def test_IdentityInterface_inputs():
    input_map = dict()
    inputs = IdentityInterface.input_spec()

    for key, metadata in input_map.items():
        for metakey, value in metadata.items():
            yield assert_equal, getattr(inputs.traits()[key], metakey), value
Exemple #2
0
 def a_pipeline(self):
     pipeline = self.new_pipeline(
         name='a_pipeline',
         inputs=[FilesetSpec('input_fileset', nifti_gz_format)],
         outputs=[FilesetSpec('output_fileset', nifti_gz_format)],
         desc=("A dummy pipeline used to test dicom-to-nifti "
               "conversion method"),
         references=[])
     identity = pipeline.create_node(IdentityInterface(['field']),
                                     name='identity')
     # Connect inputs
     pipeline.connect_input('input_fileset', identity, 'field')
     # Connect outputs
     pipeline.connect_output('output_fileset', identity, 'field')
     return pipeline
Exemple #3
0
 def pipeline5(self, **name_maps):
     pipeline = self.new_pipeline(
         'pipeline5',
         desc="",
         citations=[],
         name_maps=name_maps)
     identity = pipeline.add('identity', IdentityInterface(['a']))
     pipeline.connect_input('required', identity, 'a')
     if self.branch('branch', 'foo'):
         pipeline.connect_output('requires_foo', identity, 'a')
     elif self.branch('branch', 'bar'):
         pipeline.connect_output('requires_bar', identity, 'a')
     else:
         self.unhandled_branch('branch')
     return pipeline
def create_images_workflow():
    """ Correct for the sphinx position and use reorient to standard.
    """
    workflow = Workflow(name='minimal_proc')

    inputs = Node(IdentityInterface(fields=[
        'images',
    ]), name="in")
    outputs = Node(IdentityInterface(fields=[
        'images',
    ]), name="out")

    sphinx = MapNode(fs.MRIConvert(sphinx=True, ),
                     iterfield=['in_file'],
                     name='sphinx')

    workflow.connect(inputs, 'images', sphinx, 'in_file')

    ro = MapNode(fsl.Reorient2Std(), iterfield=['in_file'], name='ro')

    workflow.connect(sphinx, 'out_file', ro, 'in_file')
    workflow.connect(ro, 'out_file', outputs, 'images')

    return workflow
Exemple #5
0
def get_subjects_node(bids_dir, subject_list=None):
    """
    Returns a node with an iterable field "subject" containing a list of subjects,
    which can either be passed in "subject_list" or
    acquired using :py:class:`BIDSLayout` from :py:mod:`pybids`

    :param bids_dir: bids directory to search
    :param subject_list: *optional* list of subjects (instead of searching with :py:class:`BIDSLayout`)
    :return: A :py:mod:`nipype` node
    """
    subjects = pe.Node(IdentityInterface(fields=['subject']), name='subjects')
    if subject_list is None:
        subject_list = layout.BIDSLayout(bids_dir).get_subjects()
    subjects.iterables = ('subject', subject_list)
    return subjects
Exemple #6
0
 def test_archive_roundtrip(self):
     study = DummyStudy(
         self.STUDY_NAME, self.archive, runner=LinearRunner('a_dir'),
         inputs=[DatasetMatch('source1', nifti_gz_format, 'source1'),
                 DatasetMatch('source2', nifti_gz_format, 'source2'),
                 DatasetMatch('source3', nifti_gz_format, 'source3'),
                 DatasetMatch('source4', nifti_gz_format, 'source4')])
     # TODO: Should test out other file formats as well.
     source_files = [study.input(n)
                     for n in ('source1', 'source2', 'source3',
                               'source4')]
     sink_files = [study.bound_data_spec(n)
                   for n in ('sink1', 'sink3', 'sink4')]
     inputnode = pe.Node(IdentityInterface(['subject_id', 'visit_id']),
                         'inputnode')
     inputnode.inputs.subject_id = self.SUBJECT
     inputnode.inputs.visit_id = self.VISIT
     source = self.archive.source(source_files,
                                  study_name=self.STUDY_NAME)
     sink = self.archive.sink(sink_files, study_name=self.STUDY_NAME)
     sink.inputs.name = 'archive_sink'
     sink.inputs.desc = (
         "A test session created by archive roundtrip unittest")
     # Create workflow connecting them together
     workflow = pe.Workflow('source_sink_unit_test', base_dir=self.work_dir)
     workflow.add_nodes((source, sink))
     workflow.connect(inputnode, 'subject_id', source, 'subject_id')
     workflow.connect(inputnode, 'visit_id', source, 'visit_id')
     workflow.connect(inputnode, 'subject_id', sink, 'subject_id')
     workflow.connect(inputnode, 'visit_id', sink, 'visit_id')
     for source_file in source_files:
         if not source_file.name.endswith('2'):
             source_name = source_file.name
             sink_name = source_name.replace('source', 'sink')
             workflow.connect(
                 source, source_name + PATH_SUFFIX,
                 sink, sink_name + PATH_SUFFIX)
     workflow.run()
     # Check local directory was created properly
     outputs = [
         f for f in sorted(os.listdir(self.session_dir))
         if f != FIELDS_FNAME]
     self.assertEqual(outputs,
                      [self.STUDY_NAME + '_sink1.nii.gz',
                       self.STUDY_NAME + '_sink3.nii.gz',
                       self.STUDY_NAME + '_sink4.nii.gz',
                       'source1.nii.gz', 'source2.nii.gz',
                       'source3.nii.gz', 'source4.nii.gz'])
Exemple #7
0
 def pipeline(self):
     pipeline = self.create_pipeline(
         name='pipeline',
         inputs=[DatasetSpec('input_dataset', nifti_gz_format)],
         outputs=[DatasetSpec('output_dataset', nifti_gz_format)],
         desc=("A dummy pipeline used to test dicom-to-nifti "
                      "conversion method"),
         version=1,
         citations=[])
     identity = pipeline.create_node(IdentityInterface(['field']),
                                     name='identity')
     # Connect inputs
     pipeline.connect_input('input_dataset', identity, 'field')
     # Connect outputs
     pipeline.connect_output('output_dataset', identity, 'field')
     return pipeline
Exemple #8
0
 def test_repository_roundtrip(self):
     study = DummyStudy(self.STUDY_NAME,
                        self.repository,
                        processor=LinearProcessor('a_dir'),
                        inputs=[
                            FilesetSelector('source1', text_format,
                                            'source1'),
                            FilesetSelector('source2', text_format,
                                            'source2'),
                            FilesetSelector('source3', text_format,
                                            'source3'),
                            FilesetSelector('source4', text_format,
                                            'source4')
                        ])
     # TODO: Should test out other file formats as well.
     source_files = ('source1', 'source2', 'source3', 'source4')
     sink_files = ('sink1', 'sink3', 'sink4')
     inputnode = pe.Node(IdentityInterface(['subject_id', 'visit_id']),
                         'inputnode')
     inputnode.inputs.subject_id = self.SUBJECT
     inputnode.inputs.visit_id = self.VISIT
     source = study.source(source_files)
     sink = study.sink(sink_files)
     sink.inputs.name = 'repository_sink'
     sink.inputs.desc = (
         "A test session created by repository roundtrip unittest")
     # Create workflow connecting them together
     workflow = pe.Workflow('source_sink_unit_test', base_dir=self.work_dir)
     workflow.add_nodes((source, sink))
     workflow.connect(inputnode, 'subject_id', source, 'subject_id')
     workflow.connect(inputnode, 'visit_id', source, 'visit_id')
     workflow.connect(inputnode, 'subject_id', sink, 'subject_id')
     workflow.connect(inputnode, 'visit_id', sink, 'visit_id')
     for source_name in source_files:
         if not source_name.endswith('2'):
             sink_name = source_name.replace('source', 'sink')
             workflow.connect(source, source_name + PATH_SUFFIX, sink,
                              sink_name + PATH_SUFFIX)
     workflow.run()
     # Check local directory was created properly
     outputs = [
         f for f in sorted(
             os.listdir(self.get_session_dir(from_study=self.STUDY_NAME)))
         if not (f == DirectoryRepository.FIELDS_FNAME)
     ]
     self.assertEqual(outputs,
                      ['.derived', 'sink1.txt', 'sink3.txt', 'sink4.txt'])
Exemple #9
0
def index_lesion_workflow(msid, mseid, lesion):
    import nipype.interfaces.ants as ants
    from nipype.pipeline.engine import Node, Workflow, MapNode
    from nipype.interfaces.io import DataSink, DataGrabber
    from nipype.interfaces.utility import IdentityInterface, Function
    import nipype.interfaces.fsl as fsl
    from nipype.utils.filemanip import load_json

    working_directory = '/working/henry_temp/keshavan/'
    output_directory = os.path.split(lesion)[0]

    register = Workflow(name="indexed_lesion_{0}_{1}".format(msid, mseid))
    register.base_dir = working_directory
    inputnode = Node(IdentityInterface(fields=["lesion"]), name="inputspec")
    inputnode.inputs.lesion = lesion

    bin_math = Node(fsl.BinaryMaths(), name="Convert_to_binary")
    bin_math.inputs.operand_value = 1
    bin_math.inputs.operation = 'min'
    register.connect(inputnode, "lesion", bin_math, "in_file")

    cluster_lesion = Node(fsl.Cluster(threshold=0.0001,
                                      out_index_file=True,
                                      use_mm=True),
                          name="cluster_lesion")

    sinker = Node(DataSink(), name="sinker")
    sinker.inputs.base_directory = output_directory
    sinker.inputs.container = '.'
    sinker.inputs.substitutions = [('_maths', '')]

    register.connect(bin_math, "out_file", cluster_lesion, "in_file")
    register.connect(cluster_lesion, "index_file", sinker, "@cluster")

    from nipype.interfaces.freesurfer import SegStats
    segstats_lesion = Node(SegStats(), name="segstats_lesion")
    register.connect(cluster_lesion, "index_file", segstats_lesion,
                     "segmentation_file")
    register.connect(segstats_lesion, "summary_file", sinker, "@summaryfile")

    register.write_graph(graph2use='orig')
    register.config["Execution"] = {
        "keep_inputs": True,
        "remove_unnecessary_outputs": False
    }
    return register
Exemple #10
0
 def pipeline2(self):
     pipeline = self.create_pipeline(
         'pipeline2',
         inputs=[
             DatasetSpec('required', text_format),
             DatasetSpec('optional', text_format)
         ],
         outputs=[DatasetSpec('missing_input', text_format)],
         desc="",
         citations=[],
         version=1)
     identity = pipeline.create_node(IdentityInterface(['a', 'b']),
                                     'identity')
     pipeline.connect_input('required', identity, 'a')
     pipeline.connect_input('optional', identity, 'b')
     pipeline.connect_output('missing_input', identity, 'a')
     return pipeline
    def create(self):  #, **kwargs):
        """ Create the nodes and connections for the workflow """
        # Preamble
        csvReader = CSVReader()
        csvReader.inputs.in_file = self.csv_file.default_value
        csvReader.inputs.header = self.hasHeader.default_value
        csvOut = csvReader.run()

        iters = {}
        label = csvOut.outputs.__dict__.keys()[0]
        result = eval("csvOut.outputs.{0}".format(label))
        iters['tests'], iters['samples'] = sample_test_lists(
            result, self.sample_size.default_value)
        # Main event
        out_fields = ['T1', 'T2', 'Label', 'sampleindex', 'testindex']
        inputs = Node(interface=IdentityInterface(fields=out_fields),
                      run_without_submitting=True,
                      name='inputs')
        inputs.iterables = [('sampleindex', iters['samples']),
                            ('testindex', iters['tests'])]
        if not self.hasHeader.default_value:
            inputs.inputs.T1 = csvOut.outputs.column_0
            inputs.inputs.Label = csvOut.outputs.column_1
            inputs.inputs.T2 = csvOut.outputs.column_2
        else:
            pass  #TODO
        metaflow = Workflow(name='metaflow')
        metaflow.add_nodes([inputs])
        import pdb
        pdb.set_trace()
        fusionflow = FusionLabelWorkflow()
        self.connect([
            (metaflow, fusionflow, [('inputs.sampleindex', 'sampleT1s.index'),
                                    ('inputs.T1', 'sampleT1s.inlist')]),
            (metaflow, fusionflow, [('inputs.sampleindex', 'sampleT2s.index'),
                                    ('inputs.T2', 'sampleT2s.inlist')]),
            (metaflow, fusionflow, [('inputs.sampleindex',
                                     'sampleLabels.index'),
                                    ('inputs.Label', 'sampleLabels.inlist')]),
            (metaflow, fusionflow, [('inputs.testindex', 'testT1s.index'),
                                    ('inputs.T1', 'testT1s.inlist')]),
            (metaflow, fusionflow, [('inputs.testindex', 'testT2s.index'),
                                    ('inputs.T2', 'testT2s.inlist')]),
            (metaflow, fusionflow, [('inputs.testindex', 'testLabels.index'),
                                    ('inputs.Label', 'testLabels.inlist')])
        ])
Exemple #12
0
    def outputnode(self, frequency):
        """
        Generates an output node for the given frequency. It also adds implicit
        file format conversion nodes to the pipeline.

        Parameters
        ----------
        frequency : str
            The frequency (i.e. 'per_session', 'per_visit', 'per_subject' or
            'per_study') of the output node to retrieve
        """
        # Check to see whether there are any outputs for the given frequency
        outputs = list(self.frequency_outputs(frequency))
        if not outputs:
            raise ArcanaError(
                "No outputs to '{}' pipeline for requested freqency '{}'".
                format(self.name, frequency))
        # Get list of output names for the requested frequency, addding fields
        # to hold iterator IDs
        output_names = [o.name for o in outputs]
        # Generate output node and connect it to appropriate nodes
        outputnode = self.add('{}_outputnode'.format(frequency),
                              IdentityInterface(fields=output_names))
        # Loop through list of nodes connected to study data specs and
        # connect them to the newly created output node
        for output in outputs:  # @ReservedAssignment
            conv_cache = {}
            (node, node_out, format, conv_kwargs) = self._output_conns[
                output.name]  # @ReservedAssignment @IgnorePep8
            # If fileset formats differ between study and pipeline
            # outputs create converter node (if one hasn't been already)
            # and connect output to that before connecting to outputnode
            if self.requires_conversion(output, format):
                if format.name not in conv_cache:
                    conv_cache[format.name] = output.format.converter_from(
                        format, **conv_kwargs)
                (conv_node, conv_in,
                 conv_out) = conv_cache[format.name].get_node(
                     '{}_{}_{}_to_{}_conversion'.format(
                         self.name, output.name, output.format.name,
                         format.name))
                self.connect(node, node_out, conv_node, conv_in)
                self.connect(conv_node, conv_out, outputnode, output.name)
            else:
                self.connect(node, node_out, outputnode, output.name)
        return outputnode
Exemple #13
0
def test_parameterize_dirs_false(tmpdir):
    from ....interfaces.utility import IdentityInterface
    from ....testing import example_data

    input_file = example_data('fsl_motion_outliers_fd.txt')

    n1 = pe.Node(EngineTestInterface(), name='Node1')
    n1.iterables = ('input_file', (input_file, input_file))
    n1.interface.inputs.input1 = 1

    n2 = pe.Node(IdentityInterface(fields='in1'), name='Node2')

    wf = pe.Workflow(name='Test')
    wf.base_dir = str(tmpdir)
    wf.config['execution']['parameterize_dirs'] = False
    wf.connect([(n1, n2, [('output1', 'in1')])])

    wf.run()
def run(output_dir: str, pipeline_name: str, group_corr_mat: str,
        group_conf_summary: str):
    workflow = Workflow(name="test_workflow", base_dir=output_dir)
    identity_node = Node(IdentityInterface(fields=[
        "pipeline", "group_corr_mat", "distance_matrix", "group_conf_summary"
    ]),
                         name="SomeInputSource")
    identity_node.inputs.pipeline = load_pipeline_from_json(
        get_pipeline_path(pipeline_name))
    identity_node.inputs.group_corr_mat = group_corr_mat
    identity_node.inputs.distance_matrix = get_distance_matrix_file_path()
    identity_node.inputs.group_conf_summary = group_conf_summary
    quality_node = Node(QualityMeasures(output_dir=output_dir),
                        name="QualitMeasures")
    workflow.connect([(identity_node, quality_node,
                       [("pipeline", "pipeline"),
                        ("group_corr_mat", "group_corr_mat"),
                        ("distance_matrix", "distance_matrix"),
                        ("group_conf_summary", "group_conf_summary")])])
    workflow.run()
Exemple #15
0
        def test_nipype_srtm_zhou2003(self):
            infosource = Node(IdentityInterface(fields=['in_file']),
                              name="infosource")
            infosource.iterables = ('in_file', [self.pet4D_file])

            km = Node(KineticModel(model='SRTM_Zhou2003',
                                   #timeSeriesImgFile=self.pet4D_file,
                                   frameTimingFile=self.timing_file,
                                   refRegionMaskFile=self.refRegionMaskFile,
                                   startActivity=self.startActivity,
                                   weights=self.weights,
                                   fwhm=self.fwhm), name="km")

            km_workflow = Workflow(name="km_workflow",
                                   base_dir=self.tmpdirname)
            km_workflow.connect([
                (infosource, km, [('in_file', 'timeSeriesImgFile')])
            ])

            km_workflow.run()
Exemple #16
0
 def pipeline3(self, **kwargs):
     outputs = [DatasetSpec('another_derivable', text_format)]
     switch = self.pre_option('switch', 'pipeline3', **kwargs)
     if switch:
         outputs.append(DatasetSpec('wrong_option', text_format))
     pipeline = self.create_pipeline(
         'pipeline3',
         inputs=[DatasetSpec('required', text_format)],
         outputs=outputs,
         desc="",
         citations=[],
         version=1)
     identity = pipeline.create_node(IdentityInterface(['a', 'b']),
                                     'identity')
     pipeline.connect_input('required', identity, 'a')
     pipeline.connect_input('required', identity, 'b')
     pipeline.connect_output('another_derivable', identity, 'a')
     if switch:
         pipeline.connect_output('wrong_option', identity, 'b')
     return pipeline
Exemple #17
0
def create_workflow_hrfpattern_3T(glm='spm'):
    input_node = Node(IdentityInterface(fields=[
        'bold',
        'events',
    ]),
                      name='input')

    w = Workflow('hrf_3T')

    w_preproc = create_workflow_preproc_spm()
    if glm == 'spm':
        w_hrfpattern = create_workflow_hrfpattern_spm()
    elif glm == 'fsl':
        w_hrfpattern = create_workflow_hrfpattern_fsl()

    w.connect(input_node, 'bold', w_preproc, 'input.bold')
    w.connect(input_node, 'events', w_hrfpattern, 'input.events')
    w.connect(w_preproc, 'realign.realigned_files', w_hrfpattern, 'input.bold')

    return w
Exemple #18
0
def test_node_joinsource():
    """Test setting the joinsource to a Node."""
    cwd = os.getcwd()
    wd = mkdtemp()
    os.chdir(wd)

    # Make the workflow.
    wf = pe.Workflow(name='test')
    # the iterated input node
    inputspec = pe.Node(IdentityInterface(fields=['n']), name='inputspec')
    inputspec.iterables = [('n', [1, 2])]
    # the join node
    join = pe.JoinNode(SetInterface(), joinsource=inputspec,
        joinfield='input1', name='join')

    # the joinsource is the inputspec name
    assert_equal(join.joinsource, inputspec.name,
        "The joinsource is not set to the node name.")

    os.chdir(cwd)
    rmtree(wd)
def create_workflow():
    workflow = Workflow(name='transform_manual_mask')

    inputs = Node(IdentityInterface(fields=[
        'subject_id',
        'session_id',
        'manualmask',
        'manualmask_func_ref',
        'funcs',
    ]),
                  name='in')

    # Find the transformation matrix func_ref -> func
    # First find transform from func to manualmask's ref func
    findtrans = MapNode(fsl.FLIRT(), iterfield=['in_file'], name='findtrans')

    # Invert the matrix transform
    invert = MapNode(
        fsl.ConvertXFM(invert_xfm=True),
        name='invert',
        iterfield=['in_file'],
    )
    workflow.connect(findtrans, 'out_matrix_file', invert, 'in_file')

    # Transform the manualmask to be aligned with func
    funcreg = MapNode(
        ApplyXFMRefName(),
        name='funcreg',
        iterfield=['in_matrix_file', 'reference'],
    )

    workflow.connect(inputs, 'funcs', findtrans, 'in_file')
    workflow.connect(inputs, 'manualmask_func_ref', findtrans, 'reference')

    workflow.connect(invert, 'out_file', funcreg, 'in_matrix_file')

    workflow.connect(inputs, 'manualmask', funcreg, 'in_file')
    workflow.connect(inputs, 'funcs', funcreg, 'reference')

    return workflow
Exemple #20
0
def create_main_workflow_power(fif_files):

    import nipype.pipeline.engine as pe
    from nipype.interfaces.utility import IdentityInterface
    from neuropype_ephy.interfaces.mne.power import Power
    import nipype.interfaces.io as nio
    power_analysis_name = 'test'
    main_workflow = pe.Workflow(name=power_analysis_name)
    main_workflow.base_dir = '/media/dmalt/SSD500/'

    data_source = pe.Node(interface=IdentityInterface(fields=['fif_files']),
                          name='data_source')
    data_source.iterables = [('fif_files', fif_files)]

    ## Info source
    power_node = pe.Node(interface=Power(), name='pwr')
    power_node.inputs.fmin = 0
    power_node.inputs.fmax = 300
    power_node.inputs.method = 'welch'

    main_workflow.connect(data_source, 'fif_files', power_node, 'epochs_file')
    return main_workflow
Exemple #21
0
def infosrc(fif_files):
    '''Create input node.

    Use wildcards to run computations on multiple files;
    To check yourself it's a good idea to run ls command first like this:


    $ ls ./*/*.fif

    $ neuropype input ./*/*.fif

    '''

    from os.path import abspath, split
    from os.path import commonprefix as cprfx
    from nipype.interfaces.utility import IdentityInterface, Function

    fif_files = [abspath(f) for f in fif_files]

    common_prefix = split(cprfx(fif_files))[0] + '/'
    iter_mapping = dict()
    for fif_file in fif_files:
        new_base = fif_file.replace(common_prefix, '')
        new_base = new_base.replace('/', '__')
        new_base = new_base.replace('.', '-')
        iter_mapping[new_base] = fif_file

    infosource = pe.Node(interface=IdentityInterface(fields=['keys']),
                         name='infosource')

    path_node = pe.Node(interface=Function(input_names=['key', 'iter_mapping'],
                                           output_names=['path'],
                                           function=map_path),
                        name='path_node')

    infosource.iterables = [('keys', iter_mapping.keys())]
    path_node.inputs.iter_mapping = iter_mapping
    return infosource, path_node
Exemple #22
0
def wf_transform_anat(in_file_list, in_matrix_file_list, reference):
    func2std_xform = MapNode(
        FLIRT(output_type='NIFTI', apply_xfm=True),
        name="func2std_xform",
        iterfield=['in_file', 'in_matrix_file', 'reference'])

    inputspec = Node(IdentityInterface(
        fields=['in_file_list', 'in_matrix_file_list', 'reference']),
                     name="inputspec")

    inputspec.inputs.in_file_list = in_file_list
    inputspec.inputs.in_matrix_file_list = in_matrix_file_list
    inputspec.inputs.reference = reference

    wf_transform_anat = Workflow(name="wf_transform_anat")
    wf_transform_anat.connect(inputspec, 'in_file_list', func2std_xform,
                              'in_file')
    wf_transform_anat.connect(inputspec, 'in_matrix_file_list', func2std_xform,
                              'in_matrix_file')
    wf_transform_anat.connect(inputspec, 'reference', func2std_xform,
                              'reference')

    return wf_transform_anat
def run(output_dir: str):
    workflow = Workflow(name="test_workflow", base_dir=output_dir)
    identity_node = Node(IdentityInterface(fields=[
        "edges_weight", "edges_weight_clean", "fc_fd_corr_values",
        "fc_df_corr_values_clean", "fc_fc_summary", "task"
    ]),
                         name="SomeInputSource")
    identity_node.inputs.edges_weight = edges_weight
    identity_node.inputs.edges_weight_clean = edges_weight_clean
    identity_node.inputs.fc_fd_summary = fc_fd_summary
    identity_node.inputs.task = task
    identity_node.inputs.fc_fd_corr_values = fc_fd_corr_values
    identity_node.inputs.fc_fd_corr_values_clean = fc_fd_corr_values_clean
    quality_node = Node(PipelinesQualityMeasures(output_dir=output_dir),
                        name="PipelineQualitMeasures")
    workflow.connect([(identity_node, quality_node, [
        ("edges_weight_clean", "edges_weight_clean"),
        ("edges_weight", "edges_weight"),
        ("fc_fd_corr_values", "fc_fd_corr_values"),
        ("fc_fd_corr_values_clean", "fc_fd_corr_values_clean"),
        ("fc_fd_summary", "fc_fd_summary"), ("task", "task")
    ])])
    workflow.run()
Exemple #24
0
    def create(self):
        """
        This function...

        :param self:
        :return:
        """
        trainT1s = Node(interface=Select(), name="trainT1s")
        trainT2s = Node(interface=Select(), name="trainT2s")
        trainLabels = Node(interface=Select(), name="trainLabels")
        testT1s = Node(interface=Select(), name="testT1s")

        # intensityImages = Node(interface=Merge(2), name='intensityImages')

        jointFusion = Node(interface=JointFusion(), name="jointFusion")
        jointFusion.inputs.num_threads = -1
        jointFusion.inputs.dimension = 3
        jointFusion.inputs.modalities = 1  # TODO: verify 2 for T1/T2
        jointFusion.inputs.method = "Joint[0.1,2]"  # this does not work
        jointFusion.inputs.output_label_image = "fusion_neuro2012_20.nii.gz"

        outputs = Node(
            interface=IdentityInterface(fields=["output_label_image"]),
            run_without_submitting=True,
            name="outputspec",
        )

        self.connect(
            [  # Don't worry about T2s now per Regina
                # (trainT1s, intensityImages, [('out', 'in1')]),
                # (trainT2s, intensityImages, [('out', 'in2')]),
                (testT1s, jointFusion, [("out", "target_image")]),
                (trainT1s, jointFusion, [("out", "warped_intensity_images")]),
                (trainLabels, jointFusion, [("out", "warped_label_images")]),
                (jointFusion, outputs, [("output_label_image", "output_label_image")]),
            ]
        )
Exemple #25
0
def create_workflow_spatialobject_7T():
    input_node = Node(IdentityInterface(fields=[
        'bold',
        'events',
        't2star_fov',
        't2star_whole',
        't1w',
    ]),
                      name='input')

    coreg_tstat = MapNode(interface=FLIRT(),
                          name='realign_result_to_anat',
                          iterfield=[
                              'in_file',
                          ])
    coreg_tstat.inputs.apply_xfm = True

    w = Workflow('spatialobject_7T')

    w_preproc = create_workflow_preproc_spm()
    w_spatialobject = create_workflow_spatialobject_fsl()
    w_coreg = create_workflow_coreg_epi2t1w()

    w.connect(input_node, 'bold', w_preproc, 'input.bold')
    w.connect(input_node, 'events', w_spatialobject, 'input.events')
    w.connect(input_node, 't2star_fov', w_coreg, 'input.t2star_fov')
    w.connect(input_node, 't2star_whole', w_coreg, 'input.t2star_whole')
    w.connect(input_node, 't1w', w_coreg, 'input.t1w')
    w.connect(input_node, 't1w', coreg_tstat, 'reference')
    w.connect(w_preproc, 'realign.realigned_files', w_spatialobject,
              'input.bold')
    w.connect(w_preproc, 'realign.mean_image', w_coreg, 'input.bold_mean')

    w.connect(w_spatialobject, 'output.T_image', coreg_tstat, 'in_file')
    w.connect(w_coreg, 'output.mat_epi2t1w', coreg_tstat, 'in_matrix_file')

    return w
# create formatter
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)

# ### Define Input- and Output-Node

# Inputs:
# -----------
# raw_files : Path-string to the folder containing the raw DICOM files
# subject_folder : Path-String to the folder containg the already processed subject data i.e. the path where
#                the results of this step are also stored
inputNode = Node(IdentityInterface(fields=[
    'subID', 'raw_files', 'subject_folder', 'parcellation_mask', 'brainmask'
]),
                 name='inputNode')

outputNode = Node(IdentityInterface(fields=['mat_file']), name='output_node')

# ### Define Filenames

fileNames = {
    'bold_file': 'bold.nii.gz',
    'parcellation_2_func': 'parc_2_func.nii.gz',
    'func_2_anat': 'exfunc2anat_6DOF.nii.gz',
    'func_2_anat_mat': 'exfunc2anat_6DOF.mat',
    'anat_2_func_mat': 'anat2exfunc_6DOF.mat',
    'segstat_sum_file': 'segstat_summary.txt',
    'avgwf_file': 'segstat_average_ROI_timeseries.dat'
data_dir = os.path.abspath('/data/pt_nmc002/other/narps/derivatives/')
out_dir = os.path.abspath(
    '/data/pt_nmc002/other/narps/derivatives/second_lev_equal_range/')
deriv_dir = os.path.join(data_dir, 'first_lev/equalRange')
fwhm = [5]
mask = '/data/pt_nmc002/other/narps/derivatives/fmriprep/gr_mask_tmax.nii'
#template_image = '/data/pt_neunmc024/SpeechInNoise_Dyslexia_Study/MRI_Data_BIDS/derivatives/mni_template/mni1mm.nii'

# list of contrast identifiers
contrasts = ['con_0001', 'con_0002', 'con_0003']

# collect all the con images for each contrast.
contrast_ids = list(range(1, len(contrasts) + 1))

# Infosource - a function free node to iterate over the list of subject names
infosource = Node(IdentityInterface(fields=['contrast_id']), name="infosource")
infosource.iterables = [('contrast_id', contrasts)]

# Select files from derivatives.

templates = {
    'cons':
    '/data/pt_nmc002/other/narps/derivatives/first_lev/tmp/equalRange/*/contraste_estimate/{contrast_id}.nii'
}
selectderivs = Node(SelectFiles(templates, sort_filelist=True),
                    name='selectderivs')
#selectderivs.inputs.sub_id = subs

# One Sample T-Test Design - creates one sample T-Test Design
onesamplettestdes = Node(
    OneSampleTTestDesign(),
Exemple #28
0
def create_pipeline_source_reconstruction(main_path,
                                          sbj_dir,
                                          pipeline_name='inv_sol_pipeline',
                                          spacing='ico-5',
                                          inv_method='MNE',
                                          is_epoched=False,
                                          events_id=None,
                                          t_min=None,
                                          t_max=None,
                                          is_evoked=False,
                                          parc='aparc',
                                          aseg=False,
                                          aseg_labels=[],
                                          noise_cov_fname=None,
                                          save_stc=False,
                                          save_mixed_src_space=False,
                                          is_fixed=False):
    """
    Description:

        Source reconstruction pipeline

    Inputs:

        main_path : str
            the main path of the workflow
        sbj_dir : str
            Freesurfer directory
        pipeline_name : str (default inv_sol_pipeline)
            name of the pipeline
        spacing : str (default 'ico-5')
            spacing to use to setup a source space
        inv_method : str (default MNE)
            the inverse method to use; possible choices: MNE, dSPM, sLORETA
        is_epoched : bool (default False)
            if True and events_id = None the input data are epoch data
            in the format -epo.fif
            if True and events_id is not None, the raw data are epoched
            according to events_id and t_min and t_max values
        is_fixed : bool (default False)
            if True we use fixed orientation
        events_id: dict (default None)
            the dict of events
        t_min, t_max: int (defualt None)
            define the time interval in which to epoch the raw data
        is_evoked: bool (default False)
            if True the raw data will be averaged according to the events
            contained in the dict events_id
        parc: str (default 'aparc')
            the parcellation defining the ROIs atlas in the source space
        aseg: bool (defualt False)
            if True a mixed source space will be created and the sub cortical
            regions defined in aseg_labels will be added to the source space
        aseg_labels: list (default [])
            list of substructures we want to include in the mixed source space
        noise_cov_fname: str (default None)
            template for the path to either the noise covariance matrix file or
            the empty room data
        save_stc: bool (defualt False)
            if True the stc will be saved
        save_mixed_src_space: bool (defualt False)
            if True the mixed src space will be saved in the FS folder

    Inputs (inputnode):

        raw : str
            path to raw data in fif format
        sbj_id : str
            subject id

    Outouts:

        pipeline : instance of Workflow

    """

    pipeline = pe.Workflow(name=pipeline_name)
    pipeline.base_dir = main_path

    inputnode = pe.Node(IdentityInterface(fields=['sbj_id', 'raw']),
                        name='inputnode')

    # Lead Field computation Node
    LF_computation = pe.Node(interface=LFComputation(), name='LF_computation')
    LF_computation.inputs.sbj_dir = sbj_dir
    LF_computation.inputs.spacing = spacing
    LF_computation.inputs.aseg = aseg
    if aseg:
        LF_computation.inputs.aseg_labels = aseg_labels
        LF_computation.inputs.save_mixed_src_space = save_mixed_src_space

    pipeline.connect(inputnode, 'sbj_id', LF_computation, 'sbj_id')

    try:
        events_id
    except NameError:
        events_id = None

    if is_epoched and events_id is None:
        pipeline.connect(inputnode, ('raw', get_epochs_info), LF_computation,
                         'raw_info')
    else:
        pipeline.connect(inputnode, ('raw', get_raw_info), LF_computation,
                         'raw_info')

    pipeline.connect(inputnode, 'raw', LF_computation, 'raw_fname')

    # Noise Covariance Matrix Node
    create_noise_cov = pe.Node(interface=NoiseCovariance(),
                               name="create_noise_cov")

    #    if noise_cov_fname is not None:
    create_noise_cov.inputs.cov_fname_in = noise_cov_fname
    create_noise_cov.inputs.is_epoched = is_epoched
    create_noise_cov.inputs.is_evoked = is_evoked
    if is_evoked:
        create_noise_cov.inputs.events_id = events_id
        create_noise_cov.inputs.t_min = t_min
        create_noise_cov.inputs.t_max = t_max

    pipeline.connect(inputnode, 'raw', create_noise_cov, 'raw_filename')

    # Inverse Solution Node
    inv_solution = pe.Node(interface=InverseSolution(), name='inv_solution')

    inv_solution.inputs.sbj_dir = sbj_dir
    inv_solution.inputs.inv_method = inv_method
    inv_solution.inputs.is_epoched = is_epoched
    inv_solution.inputs.is_fixed = is_fixed

    if is_epoched and events_id is not None:
        inv_solution.inputs.events_id = events_id
        inv_solution.inputs.t_min = t_min
        inv_solution.inputs.t_max = t_max

    inv_solution.inputs.is_evoked = is_evoked
    if is_epoched and is_evoked:
        inv_solution.inputs.events_id = events_id

    inv_solution.inputs.parc = parc
    inv_solution.inputs.aseg = aseg
    if aseg:
        inv_solution.inputs.aseg_labels = aseg_labels

    inv_solution.inputs.save_stc = save_stc

    pipeline.connect(inputnode, 'sbj_id', inv_solution, 'sbj_id')
    pipeline.connect(inputnode, 'raw', inv_solution, 'raw_filename')
    pipeline.connect(LF_computation, 'fwd_filename', inv_solution,
                     'fwd_filename')
    pipeline.connect(create_noise_cov, 'cov_fname_out', inv_solution,
                     'cov_filename')

    return pipeline
def run_workflow(session=None, csv_file=None):
    from nipype import config
    #config.enable_debug_mode()

    method = 'fs'  # freesurfer's mri_convert is faster
    if method == 'fs':
        import nipype.interfaces.freesurfer as fs  # freesurfer
    else:
        assert method == 'fsl'
        import nipype.interfaces.fsl as fsl  # fsl

    # ------------------ Specify variables
    ds_root = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))

    data_dir = ds_root
    output_dir = 'derivatives/resampled-isotropic-06mm'
    working_dir = 'workingdirs'

    # ------------------ Input Files
    infosource = Node(IdentityInterface(fields=[
        'subject_id',
        'session_id',
        'datatype',
    ]),
                      name="infosource")

    if csv_file is not None:
        # Read csv and use pandas to set-up image and ev-processing
        df = pd.read_csv(csv_file)
        # init lists
        sub_img = []
        ses_img = []
        dt_img = []

        # fill lists to iterate mapnodes
        for index, row in df.iterrows():
            for dt in row.datatype.strip("[]").split(" "):
                if dt in ['anat']:  # only for anatomicals
                    sub_img.append(row.subject)
                    ses_img.append(row.session)
                    dt_img.append(dt)

        # check if the file definitions are ok
        if len(dt_img) > 0:
            print('There are images to process. Will continue.')
        else:
            print('No images specified. Check your csv-file.')

        infosource.iterables = [('session_id', ses_img),
                                ('subject_id', sub_img), ('datatype', dt_img)]
        infosource.synchronize = True
    else:
        print('No csv-file specified. Cannot continue.')

    # SelectFiles
    templates = {
        'image':
        'sub-{subject_id}/ses-{session_id}/{datatype}/'
        'sub-{subject_id}_ses-{session_id}_*.nii.gz',
    }
    inputfiles = Node(nio.SelectFiles(templates, base_directory=data_dir),
                      name="input_files")

    # ------------------ Output Files
    # Datasink
    outputfiles = Node(nio.DataSink(base_directory=ds_root,
                                    container=output_dir,
                                    parameterization=True),
                       name="output_files")

    # Use the following DataSink output substitutions
    outputfiles.inputs.substitutions = [
        ('subject_id_', 'sub-'),
        ('session_id_', 'ses-'),
        # BIDS Extension Proposal: BEP003
        ('_resample.nii.gz', '_res-06x06x06_preproc.nii.gz'),
        # remove subdirectories:
        ('resampled-isotropic-06mm/isoxfm-06mm', 'resampled-isotropic-06mm'),
        ('resampled-isotropic-06mm/mriconv-06mm', 'resampled-isotropic-06mm'),
    ]
    # Put result into a BIDS-like format
    outputfiles.inputs.regexp_substitutions = [
        # this works only if datatype is specified in input
        (r'_datatype_([a-z]*)_ses-([a-zA-Z0-9]*)_sub-([a-zA-Z0-9]*)',
         r'sub-\3/ses-\2/\1'),
        (r'_fs_iso06mm[0-9]*/', r''),
        (r'/_ses-([a-zA-Z0-9]*)_sub-([a-zA-Z0-9]*)', r'/sub-\2/ses-\1/'),
        # stupid hacks for when datatype is not specified
        (r'//(sub-[^/]*_bold_res-.*)', r'/func/\1'),
        (r'//(sub-[^/]*_phasediff_res-.*.nii.gz)', r'/fmap/\1'),
        (r'//(sub-[^/]*_magnitude1_res-.*.nii.gz)', r'/fmap/\1'),
        (r'//(sub-[^/]*_epi_res-.*.nii.gz)', r'/fmap/\1'),
        (r'//(sub-[^/]*_T1w_res-.*.nii.gz)', r'/anat/\1'),
        (r'//(sub-[^/]*_T2w_res-.*.nii.gz)', r'/anat/\1'),
        (r'//(sub-[^/]*_dwi_res-.*.nii.gz)', r'/dwi/\1'),
    ]

    # -------------------------------------------- Create Pipeline
    isotropic_flow = Workflow(name='resample_isotropic06mm',
                              base_dir=os.path.join(ds_root, working_dir))

    isotropic_flow.connect([(infosource, inputfiles, [
        ('subject_id', 'subject_id'),
        ('session_id', 'session_id'),
        ('datatype', 'datatype'),
    ])])

    # --- Convert to 1m isotropic voxels

    if method == 'fs':
        fs_iso06mm = MapNode(
            fs.Resample(
                voxel_size=(0.6, 0.6, 0.6),
                # suffix is not accepted by fs.Resample
                # suffix='_res-1x1x1_preproc',
                # BIDS Extension Proposal: BEP003
            ),
            name='fs_iso06mm',
            iterfield=['in_file'],
        )

        isotropic_flow.connect(inputfiles, 'image', fs_iso06mm, 'in_file')
        isotropic_flow.connect(fs_iso06mm, 'resampled_file', outputfiles,
                               'mriconv-06mm')
    elif method == 'fsl':
        # in_file --> out_file
        isoxfm = Node(fsl.FLIRT(apply_isoxfm=0.6, ), name='isoxfm')

        isotropic_flow.connect(inputfiles, 'image', isoxfm, 'in_file')
        isotropic_flow.connect(inputfiles, 'image', isoxfm, 'reference')
        isotropic_flow.connect(isoxfm, 'out_file', outputfiles, 'isoxfm-06mm')

    isotropic_flow.stop_on_first_crash = False  # True
    isotropic_flow.keep_inputs = True
    isotropic_flow.remove_unnecessary_outputs = False
    isotropic_flow.write_graph()
    outgraph = isotropic_flow.run()
Exemple #30
0
def rest_noise_filter_wf(wf_name='rest_noise_removal'):
    """ Create a resting-state fMRI noise removal node.

    Nipype Inputs
    -------------
    rest_noise_input.in_file

    rest_noise_input.brain_mask

    rest_noise_input.wm_mask

    rest_noise_input.csf_mask

    rest_noise_input.motion_params
        Nipy motion parameters.

    Nipype Outputs
    --------------
    rest_noise_output.tsnr_file
        A SNR estimation volume file for QA purposes.

    rest_noise_output.motion_corrected
        The fMRI motion corrected image.

    rest_noise_output.nuis_corrected
        The resulting nuisance corrected image.
        This will be the same as 'motion_corrected' if compcor
        is disabled.

    rest_noise_output.motion_regressors
        Motion regressors file.

    rest_noise_output.compcor_regressors
        CompCor regressors file.

    rest_noise_output.art_displacement_files
        One image file containing the voxel-displacement timeseries.

    rest_noise_output.art_intensity_files
        One file containing the global intensity values determined
        from the brainmask.

    rest_noise_output.art_norm_files
        One file containing the composite norm.

    rest_noise_output.art_outlier_files
         One file containing a list of 0-based indices corresponding
         to outlier volumes.

    rest_noise_output.art_plot_files
        One image file containing the detected outliers.

    rest_noise_output.art_statistic_files
        One file containing information about the different types of
        artifacts and if design info is provided then details of
        stimulus correlated motion and a listing or artifacts by
        event type.

    Returns
    -------
    rm_nuisance_wf: nipype Workflow
    """

    # Create the workflow object
    wf = pe.Workflow(name=wf_name)

    in_fields = [
        "in_file",
        "brain_mask",
        "wm_mask",
        "csf_mask",
        "motion_params",
    ]

    out_fields = [
        "tsnr_file",
        "motion_corrected",
        "nuis_corrected",
        "motion_regressors",
        "compcor_regressors",
        "gsr_regressors",
        "art_displacement_files",
        "art_intensity_files",
        "art_norm_files",
        "art_outlier_files",
        "art_plot_files",
        "art_statistic_files",
    ]

    # input identities
    rest_noise_input = setup_node(IdentityInterface(fields=in_fields,
                                                    mandatory_inputs=True),
                                  name="rest_noise_input")

    # get the settings for filters
    filters = _get_params_for('rest_filter')

    # Compute TSNR on realigned data regressing polynomial up to order 2
    tsnr = setup_node(TSNR(regress_poly=2), name='tsnr')

    # Use :class:`nipype.algorithms.rapidart` to determine which of the
    # images in the functional series are outliers based on deviations in
    # intensity or movement.
    art = setup_node(rapidart_fmri_artifact_detection(),
                     name="detect_artifacts")

    # Compute motion regressors
    motion_regs = setup_node(Function(
        input_names=[
            'motion_params',
            'order',
            'derivatives',
        ],
        output_names=['out_files'],
        function=motion_regressors,
    ),
                             name='motion_regressors')

    # Create a filter to remove motion and art confounds
    motart_pars = setup_node(Function(
        input_names=['motion_params', 'comp_norm', 'outliers', 'detrend_poly'],
        output_names=['out_files'],
        function=create_regressors),
                             name='motart_parameters')

    motion_filter = setup_node(fsl.GLM(out_f_name='F_mcart.nii.gz',
                                       out_pf_name='pF_mcart.nii.gz',
                                       demean=True),
                               name='motion_filter')

    # Noise confound regressors
    compcor_pars = setup_node(Function(
        input_names=[
            'realigned_file', 'mask_file', 'num_components', 'extra_regressors'
        ],
        output_names=['out_files'],
        function=extract_noise_components,
    ),
                              name='compcor_pars')
    #compcor_pars = setup_node(ACompCor(), name='compcor_pars')
    #compcor_pars.inputs.components_file = 'noise_components.txt'

    compcor_filter = setup_node(fsl.GLM(out_f_name='F.nii.gz',
                                        out_pf_name='pF.nii.gz',
                                        demean=True),
                                name='compcor_filter')

    # Global signal regression
    gsr_pars = setup_node(Function(
        input_names=[
            'realigned_file', 'mask_file', 'num_components', 'extra_regressors'
        ],
        output_names=['out_files'],
        function=extract_noise_components,
    ),
                          name='gsr_pars')

    gsr_filter = setup_node(fsl.GLM(out_f_name='F_gsr.nii.gz',
                                    out_pf_name='pF_gsr.nii.gz',
                                    demean=True),
                            name='gsr_filter')

    # output identities
    rest_noise_output = setup_node(IdentityInterface(fields=out_fields,
                                                     mandatory_inputs=True),
                                   name="rest_noise_output")

    # Connect the nodes
    wf.connect([
        # tsnr
        (rest_noise_input, tsnr, [("in_file", "in_file")]),

        # artifact detection
        (rest_noise_input, art, [
            ("in_file", "realigned_files"),
            ("motion_params", "realignment_parameters"),
            ("brain_mask", "mask_file"),
        ]),

        # calculte motion regressors
        (rest_noise_input, motion_regs, [("motion_params", "motion_params")]),

        # create motion and confound regressors parameters file
        (art, motart_pars, [
            ("norm_files", "comp_norm"),
            ("outlier_files", "outliers"),
        ]),
        (motion_regs, motart_pars, [("out_files", "motion_params")]),

        # motion filtering
        (rest_noise_input, motion_filter, [
            ("in_file", "in_file"),
            (("in_file", rename, "_filtermotart"), "out_res_name"),
        ]),
        (motart_pars, motion_filter, [(("out_files", selectindex, [0]),
                                       "design")]),

        # output
        (tsnr, rest_noise_output, [("tsnr_file", "tsnr_file")]),
        (motart_pars, rest_noise_output, [("out_files", "motion_regressors")]),
        (motion_filter, rest_noise_output, [("out_res", "motion_corrected")]),
        (art, rest_noise_output, [
            ("displacement_files", "art_displacement_files"),
            ("intensity_files", "art_intensity_files"),
            ("norm_files", "art_norm_files"),
            ("outlier_files", "art_outlier_files"),
            ("plot_files", "art_plot_files"),
            ("statistic_files", "art_statistic_files"),
        ]),
    ])

    last_filter = motion_filter

    # compcor filter
    if filters['compcor_csf'] or filters['compcor_wm']:
        wf.connect([
            # calculate compcor regressor and parameters file
            (motart_pars, compcor_pars, [
                (("out_files", selectindex, [0]), "extra_regressors"),
            ]),
            (motion_filter, compcor_pars, [
                ("out_res", "realigned_file"),
            ]),

            # the compcor filter
            (motion_filter, compcor_filter, [
                ("out_res", "in_file"),
                (("out_res", rename, "_cleaned"), "out_res_name"),
            ]),
            (compcor_pars, compcor_filter, [(("out_files", selectindex, [0]),
                                             "design")]),
            #(compcor_pars,     compcor_filter,    [("components_file",  "design")]),
            (rest_noise_input, compcor_filter, [("brain_mask", "mask")]),

            # output
            (compcor_pars, rest_noise_output, [("out_files",
                                                "compcor_regressors")]),
            #(compcor_pars,     rest_noise_output, [("components_file",   "compcor_regressors")]),
        ])
        last_filter = compcor_filter

    # global signal regression
    if filters['gsr']:
        wf.connect([
            # calculate gsr regressors parameters file
            (last_filter, gsr_pars, [("out_res", "realigned_file")]),
            (rest_noise_input, gsr_pars, [("brain_mask", "mask_file")]),

            # the output file name
            (rest_noise_input, gsr_filter, [("brain_mask", "mask")]),
            (last_filter, gsr_filter, [
                ("out_res", "in_file"),
                (("out_res", rename, "_gsr"), "out_res_name"),
            ]),
            (gsr_pars, gsr_filter, [(("out_files", selectindex, [0]), "design")
                                    ]),

            # output
            (gsr_pars, rest_noise_output, [("out_files", "gsr_regressors")]),
        ])
        last_filter = gsr_filter

    # connect the final nuisance correction output node
    wf.connect([
        (last_filter, rest_noise_output, [("out_res", "nuis_corrected")]),
    ])

    if filters['compcor_csf'] and filters['compcor_wm']:
        mask_merge = setup_node(Merge(2), name="mask_merge")
        wf.connect([
            ## the mask for the compcor filter
            (rest_noise_input, mask_merge, [("wm_mask", "in1")]),
            (rest_noise_input, mask_merge, [("csf_mask", "in2")]),
            (mask_merge, compcor_pars, [("out", "mask_file")]),
        ])

    elif filters['compcor_csf']:
        wf.connect([
            ## the mask for the compcor filter
            (rest_noise_input, compcor_pars, [("csf_mask", "mask_file")]),
        ])

    elif filters['compcor_wm']:
        wf.connect([
            ## the mask for the compcor filter
            (rest_noise_input, compcor_pars, [("wm_mask", "mask_file")]),
        ])

    return wf
Exemple #31
0
def spm_mrpet_preprocessing(wf_name="spm_mrpet_preproc"):
    """ Run the PET pre-processing workflow against the
    gunzip_pet.in_file files.
    It depends on the anat_preproc_workflow, so if this
    has not been run, this function will run it too.

    # TODO: organize the anat2pet hack/condition somehow:
    If anat2pet:
    - SPM12 Coregister T1 and tissues to PET
    - PVC the PET image in PET space
    - SPM12 Warp PET to MNI
    else:
    - SPM12 Coregister PET to T1
    - PVC the PET image in anatomical space
    - SPM12 Warp PET in anatomical space to MNI through the
    `anat_to_mni_warp`.

    Parameters
    ----------
    wf_name: str
        Name of the workflow.

    Nipype Inputs
    -------------
    pet_input.in_file: traits.File
        The raw NIFTI_GZ PET image file

    pet_input.anat: traits.File
        Path to the high-contrast anatomical image.
        Reference file of the warp_field, i.e., the
        anatomical image in its native space.

    pet_input.anat_to_mni_warp: traits.File
        The warp field from the transformation of the
        anatomical image to the standard MNI space.

    pet_input.atlas_anat: traits.File
        The atlas file in anatomical space.

    pet_input.tissues: list of traits.File
        List of tissues files from the New Segment process.
        At least the first 3 tissues must be present.

    Nipype outputs
    --------------
    pet_output.pvc_out: existing file
        The results of the PVC process

    pet_output.brain_mask: existing file
        A brain mask calculated with the tissues file.

    pet_output.coreg_ref: existing file
        The coregistered reference image to PET space.

    pet_output.coreg_others: list of existing files
        List of coregistered files from coreg_pet.apply_to_files

    pet_output.pvc_warped: existing file
        Results from PETPVC normalized to MNI.
        The result of every internal pre-processing step
        is normalized to MNI here.

    pet_output.warp_field: existing files
        Spatial normalization parameters .mat files

    pet_output.gm_norm: existing file
        The output of the grey matter intensity
        normalization process.
        This is the last step in the PET signal correction,
        before registration.

    pet_output.atlas_pet: existing file
        Atlas image warped to PET space.
        If the `atlas_file` option is an existing file and
        `normalize_atlas` is True.

    Returns
    -------
    wf: nipype Workflow
    """
    # specify input and output fields
    in_fields  = ["in_file",
                  "anat",
                  "anat_to_mni_warp",
                  "tissues",]

    out_fields = ["brain_mask",
                  "coreg_others",
                  "coreg_ref",
                  "pvc_warped",
                  "pet_warped", # 'pet_warped' is a dummy entry to keep the fields pattern.
                  "warp_field",
                  "pvc_out",
                  "pvc_mask",
                  "gm_norm",]

    do_atlas, _ = check_atlas_file()
    if do_atlas:
        in_fields  += ["atlas_anat"]
        out_fields += ["atlas_pet" ]

    # input
    pet_input = setup_node(IdentityInterface(fields=in_fields, mandatory_inputs=True),
                           name="pet_input")

    # workflow to perform partial volume correction
    petpvc    = petpvc_workflow(wf_name="petpvc")

    merge_list = setup_node(Merge(4), name='merge_for_unzip')
    gunzipper = pe.MapNode(Gunzip(), name="gunzip", iterfield=['in_file'])

    warp_pet = setup_node(spm_normalize(), name="warp_pet")

    tpm_bbox = setup_node(Function(function=get_bounding_box,
                                   input_names=["in_file"],
                                   output_names=["bbox"]),
                          name="tpm_bbox")
    tpm_bbox.inputs.in_file = spm_tpm_priors_path()

    # output
    pet_output = setup_node(IdentityInterface(fields=out_fields), name="pet_output")

    # Create the workflow object
    wf = pe.Workflow(name=wf_name)

    # check how to perform the registration, to decide how to build the pipeline
    anat2pet = get_config_setting('registration.anat2pet', False)
    if anat2pet:
        wf.connect([
                    # inputs
                    (pet_input, petpvc,     [("in_file", "pvc_input.in_file"),
                                             ("anat",    "pvc_input.reference_file"),
                                             ("tissues", "pvc_input.tissues")]),

                    # gunzip some files for SPM Normalize
                    (petpvc,    merge_list, [("pvc_output.pvc_out",    "in1"),
                                             ("pvc_output.brain_mask", "in2"),
                                             ("pvc_output.gm_norm",    "in3")]),
                    (pet_input, merge_list, [("in_file",               "in4")]),

                    (merge_list, gunzipper, [("out", "in_file")]),

                    # warp the PET PVCed to MNI
                    (petpvc,    warp_pet,   [("pvc_output.coreg_ref", "image_to_align")]),
                    (gunzipper, warp_pet,   [("out_file",             "apply_to_files")]),
                    (tpm_bbox,  warp_pet,   [("bbox",                 "write_bounding_box")]),

                    # output
                    (petpvc,    pet_output, [("pvc_output.pvc_out",      "pvc_out"),
                                             ("pvc_output.brain_mask",   "brain_mask"),
                                             ("pvc_output.coreg_ref",    "coreg_ref"),
                                             ("pvc_output.coreg_others", "coreg_others"),
                                             ("pvc_output.gm_norm",      "gm_norm")]),

                    # output
                    (warp_pet,  pet_output, [("normalized_files",  "pvc_warped"),
                                             ("deformation_field", "warp_field")]),
                   ])
    else: # PET 2 ANAT
        collector  = setup_node(Merge(2), name='merge_for_warp')
        apply_warp = setup_node(spm_apply_deformations(), name="warp_pet")

        wf.connect([
                    # inputs
                    (pet_input, petpvc,     [("in_file", "pvc_input.in_file"),
                                             ("anat",    "pvc_input.reference_file"),
                                             ("tissues", "pvc_input.tissues")]),

                    # gunzip some files for SPM Normalize
                    (petpvc,    merge_list, [("pvc_output.pvc_out",    "in1"),
                                             ("pvc_output.brain_mask", "in2"),
                                             ("pvc_output.gm_norm",    "in3")]),
                    (pet_input, merge_list, [("in_file",               "in4")]),

                    (merge_list, gunzipper, [("out",                   "in_file")]),

                    # warp the PET PVCed to MNI
                    (gunzipper,   collector,   [("out_file",             "in1")]),
                    (petpvc,      collector,   [("pvc_output.coreg_ref", "in2")]),

                    (pet_input,   apply_warp,  [("anat_to_mni_warp", "deformation_file")]),
                    (collector,   apply_warp,  [("out",              "apply_to_files")]),
                    (tpm_bbox,    apply_warp,  [("bbox",             "write_bounding_box")]),

                    # output
                    (petpvc,    pet_output, [("pvc_output.pvc_out",      "pvc_out"),
                                             ("pvc_output.brain_mask",   "brain_mask"),
                                             ("pvc_output.petpvc_mask",  "petpvc_mask"),
                                             ("pvc_output.coreg_ref",    "coreg_ref"),
                                             ("pvc_output.coreg_others", "coreg_others"),
                                             ("pvc_output.gm_norm",      "gm_norm")]),

                    # output
                    (apply_warp,  pet_output, [("normalized_files",  "pvc_warped"),
                                               ("deformation_field", "warp_field")]),
                   ])


    if do_atlas:
        coreg_atlas = setup_node(spm_coregister(cost_function="mi"), name="coreg_atlas")

        # set the registration interpolation to nearest neighbour.
        coreg_atlas.inputs.write_interp = 0
        wf.connect([
                    (pet_input,   coreg_atlas, [("anat",                 "source")]),
                    (petpvc,      coreg_atlas, [("pvc_output.coreg_ref", "target")]),
                    (pet_input,   coreg_atlas, [("atlas_anat",           "apply_to_files")]),
                    (coreg_atlas, pet_output,  [("coregistered_files",   "atlas_pet")]),
        ])

    return wf