Exemple #1
0
    def fix_training_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            name='training_fix',
            desc=("Pipeline to create the training set for FIX given a group "
                  "of subjects with the hand_label_noise.txt file within "
                  "their fix_dir."),
            citations=[fsl_cite],
            name_maps=name_maps)

        num_fix_dirs = len(self.bold_substudies())
        merge_fix_dirs = pipeline.add('merge_fix_dirs',
                                      NiPypeMerge(num_fix_dirs))
        merge_label_files = pipeline.add('merge_label_files',
                                         NiPypeMerge(num_fix_dirs))
        for i, substudy_name in enumerate(self.bold_substudies(), start=1):
            spec = self.substudy_spec(substudy_name)
            pipeline.connect_input(spec.inverse_map('fix_dir'), merge_fix_dirs,
                                   'in{}'.format(i), directory_format)
            pipeline.connect_input(spec.inverse_map('hand_label_noise'),
                                   merge_label_files, 'in{}'.format(i),
                                   text_format)

        merge_visits = pipeline.add(
            IdentityInterface(['list_dir', 'list_label_files']),
            inputs={
                'list_dir': (merge_fix_dirs, 'out'),
                'list_label_files': (merge_label_files, 'out')
            },
            joinsource=self.SUBJECT_ID,
            joinfield=['list_dir', 'list_label_files'],
            name='merge_visits')

        merge_subjects = pipeline.add('merge_subjects',
                                      NiPypeMerge(2, ravel_inputs=True),
                                      inputs={
                                          'in1': (merge_visits, 'list_dir'),
                                          'in2':
                                          (merge_visits, 'list_label_files')
                                      },
                                      joinsource=self.SUBJECT_ID,
                                      joinfield=['in1', 'in2'])

        prepare_training = pipeline.add(
            'prepare_training',
            PrepareFIXTraining(epi_number=num_fix_dirs),
            inputs={'inputs_list': (merge_subjects, 'out')})

        pipeline.add('fix_training',
                     FSLFixTraining(outname='FIX_training_set', training=True),
                     inputs={'list_dir': (prepare_training, 'prepared_dirs')},
                     outputs={'train_data': ('training_set', rfile_format)},
                     wall_time=240,
                     requirements=[fix_req.v('1.0')])

        return pipeline
Exemple #2
0
    def gather_fmri_result_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            name='gather_fmri',
            desc=("Pipeline to gather together all the pre-processed "
                  "fMRI images"),
            version=1,
            citations=[fsl_cite],
            name_maps=name_maps)

        merge_inputs = pipeline.add('merge_inputs',
                                    NiPypeMerge(len(self.bold_substudies())))
        for i, substudy_name in enumerate(self.bold_substudies(), start=1):
            spec = self.substudy_spec(substudy_name)
            pipeline.connect_input(spec.inverse_map('smoothed_ts'),
                                   merge_inputs, 'in{}'.format(i),
                                   nifti_gz_format)

        pipeline.add('copy2dir',
                     CopyToDir(),
                     inputs={'in_files': (merge_inputs, 'out')},
                     outputs={
                         'fmri_pre-processeing_results':
                         ('out_dir', directory_format)
                     })

        return pipeline
Exemple #3
0
    def timeseries_normalization_to_atlas_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            name='timeseries_normalization_to_atlas_pipeline',
            desc=("Apply ANTs transformation to the fmri filtered file to "
                  "normalize it to MNI 2mm."),
            citations=[fsl_cite],
            name_maps=name_maps)

        merge_trans = pipeline.add('merge_transforms',
                                   NiPypeMerge(3),
                                   inputs={
                                       'in1': ('coreg_to_tmpl_ants_warp',
                                               nifti_gz_format),
                                       'in2': ('coreg_to_tmpl_ants_mat',
                                               text_matrix_format),
                                       'in3':
                                       ('coreg_matrix', text_matrix_format)
                                   },
                                   wall_time=1)

        pipeline.add(
            'ApplyTransform',
            ApplyTransforms(interpolation='Linear', input_image_type=3),
            inputs={
                'reference_image': ('template_brain', nifti_gz_format),
                'input_image': ('cleaned_file', nifti_gz_format),
                'transforms': (merge_trans, 'out')
            },
            outputs={'normalized_ts': ('output_image', nifti_gz_format)},
            wall_time=7,
            mem_gb=24,
            requirements=[ants_req.v('2')])

        return pipeline
Exemple #4
0
    def timeseries_normalization_to_atlas_pipeline(self, **kwargs):

        pipeline = self.create_pipeline(
            name='timeseries_normalization_to_atlas_pipeline',
            inputs=[
                DatasetSpec('cleaned_file', nifti_gz_format),
                DatasetSpec('coreg_to_atlas_warp', nifti_gz_format),
                DatasetSpec('coreg_to_atlas_mat', text_matrix_format),
                DatasetSpec('coreg_matrix', text_matrix_format)
            ],
            outputs=[DatasetSpec('normalized_ts', nifti_gz_format)],
            desc=("Apply ANTs transformation to the fmri filtered file to "
                  "normalize it to MNI 2mm."),
            version=1,
            citations=[fsl_cite],
            **kwargs)

        merge_trans = pipeline.create_node(NiPypeMerge(3),
                                           name='merge_transforms',
                                           wall_time=1)
        pipeline.connect_input('coreg_to_atlas_warp', merge_trans, 'in1')
        pipeline.connect_input('coreg_to_atlas_mat', merge_trans, 'in2')
        pipeline.connect_input('coreg_matrix', merge_trans, 'in3')

        apply_trans = pipeline.create_node(ApplyTransforms(),
                                           name='ApplyTransform',
                                           wall_time=7,
                                           memory=24000,
                                           requirements=[ants2_req])
        ref_brain = self.parameter('MNI_template')
        apply_trans.inputs.reference_image = ref_brain
        apply_trans.inputs.interpolation = 'Linear'
        apply_trans.inputs.input_image_type = 3
        pipeline.connect(merge_trans, 'out', apply_trans, 'transforms')
        pipeline.connect_input('cleaned_file', apply_trans, 'input_image')

        pipeline.connect_output('normalized_ts', apply_trans, 'output_image')

        return pipeline
Exemple #5
0
    def fix_training_pipeline(self, **kwargs):

        inputs = []
        sub_study_names = []
        for sub_study_spec in self.sub_study_specs():
            try:
                spec = self.data_spec(sub_study_spec.inverse_map('fix_dir'))
                spec._format = directory_format
                inputs.append(spec)
                inputs.append(
                    self.data_spec(
                        sub_study_spec.inverse_map('hand_label_noise')))
                sub_study_names.append(sub_study_spec.name)
            except ArcanaNameError:
                continue  # Sub study doesn't have fix dir

        pipeline = self.create_pipeline(
            name='training_fix',
            inputs=inputs,
            outputs=[DatasetSpec('train_data', rfile_format)],
            desc=("Pipeline to create the training set for FIX given a group "
                  "of subjects with the hand_label_noise.txt file within "
                  "their fix_dir."),
            version=1,
            citations=[fsl_cite],
            **kwargs)

        num_fix_dirs = len(sub_study_names)
        merge_fix_dirs = pipeline.create_node(NiPypeMerge(num_fix_dirs),
                                              name='merge_fix_dirs')
        merge_label_files = pipeline.create_node(NiPypeMerge(num_fix_dirs),
                                                 name='merge_label_files')
        for i, sub_study_name in enumerate(sub_study_names, start=1):
            spec = self.sub_study_spec(sub_study_name)
            pipeline.connect_input(spec.inverse_map('fix_dir'), merge_fix_dirs,
                                   'in{}'.format(i))
            pipeline.connect_input(spec.inverse_map('hand_label_noise'),
                                   merge_label_files, 'in{}'.format(i))

        merge_visits = pipeline.create_join_visits_node(
            IdentityInterface(['list_dir', 'list_label_files']),
            joinfield=['list_dir', 'list_label_files'],
            name='merge_visits')
        merge_subjects = pipeline.create_join_subjects_node(
            NiPypeMerge(2), joinfield=['in1', 'in2'], name='merge_subjects')
        merge_subjects.inputs.ravel_inputs = True

        prepare_training = pipeline.create_node(PrepareFIXTraining(),
                                                name='prepare_training')
        prepare_training.inputs.epi_number = num_fix_dirs
        pipeline.connect(merge_fix_dirs, 'out', merge_visits, 'list_dir')
        pipeline.connect(merge_visits, 'list_dir', merge_subjects, 'in1')
        pipeline.connect(merge_label_files, 'out', merge_visits,
                         'list_label_files')
        pipeline.connect(merge_visits, 'list_label_files', merge_subjects,
                         'in2')
        pipeline.connect(merge_subjects, 'out', prepare_training,
                         'inputs_list')

        fix_training = pipeline.create_node(FSLFixTraining(),
                                            name='fix_training',
                                            wall_time=240,
                                            requirements=[fix_req])
        fix_training.inputs.outname = 'FIX_training_set'
        fix_training.inputs.training = True
        pipeline.connect(prepare_training, 'prepared_dirs', fix_training,
                         'list_dir')

        pipeline.connect_output('train_data', fix_training, 'training_set')

        return pipeline