Example #1
0
class DummyStudy(with_metaclass(StudyMetaClass, Study)):

    add_data_specs = [
        InputFilesetSpec('source1', text_format, optional=True),
        InputFilesetSpec('source2', text_format, optional=True),
        InputFilesetSpec('source3', text_format, optional=True),
        InputFilesetSpec('source4', text_format, optional=True),
        FilesetSpec('sink1', text_format, 'dummy_pipeline'),
        FilesetSpec('sink3', text_format, 'dummy_pipeline'),
        FilesetSpec('sink4', text_format, 'dummy_pipeline'),
        FilesetSpec('subject_sink',
                    text_format,
                    'dummy_pipeline',
                    frequency='per_subject'),
        FilesetSpec('visit_sink',
                    text_format,
                    'dummy_pipeline',
                    frequency='per_visit'),
        FilesetSpec('study_sink',
                    text_format,
                    'dummy_pipeline',
                    frequency='per_study'),
        FilesetSpec('resink1', text_format, 'dummy_pipeline'),
        FilesetSpec('resink2', text_format, 'dummy_pipeline'),
        FilesetSpec('resink3', text_format, 'dummy_pipeline'),
        FieldSpec('field1', int, 'dummy_pipeline'),
        FieldSpec('field2', float, 'dummy_pipeline'),
        FieldSpec('field3', str, 'dummy_pipeline')
    ]

    def dummy_pipeline(self, **name_maps):
        return self.new_pipeline('dummy', name_maps=name_maps)
Example #2
0
class DummyAnalysis(Analysis, metaclass=AnalysisMetaClass):

    add_data_specs = [
        InputFilesetSpec('source1', text_format),
        InputFilesetSpec('source2', text_format, optional=True),
        InputFilesetSpec('source3', text_format, optional=True),
        InputFilesetSpec('source4', text_format, optional=True),
        FilesetSpec('sink1', text_format, 'dummy_pipeline'),
        FilesetSpec('sink3', text_format, 'dummy_pipeline'),
        FilesetSpec('sink4', text_format, 'dummy_pipeline'),
        FilesetSpec('subject_sink',
                    text_format,
                    'dummy_pipeline',
                    frequency='per_subject'),
        FilesetSpec('visit_sink',
                    text_format,
                    'dummy_pipeline',
                    frequency='per_visit'),
        FilesetSpec('analysis_sink',
                    text_format,
                    'dummy_pipeline',
                    frequency='per_dataset'),
        FilesetSpec('resink1', text_format, 'dummy_pipeline'),
        FilesetSpec('resink2', text_format, 'dummy_pipeline'),
        FilesetSpec('resink3', text_format, 'dummy_pipeline'),
        FieldSpec('field1', int, 'dummy_pipeline'),
        FieldSpec('field2', float, 'dummy_pipeline'),
        FieldSpec('field3', str, 'dummy_pipeline')
    ]

    def dummy_pipeline(self, **name_maps):
        return self.new_pipeline('dummy_pipeline', name_maps=name_maps)
Example #3
0
 def pipeline2(self):
     pipeline = self.pipeline(
         name='pipeline2',
         inputs=[
             FilesetSpec('ones', text_format),
             FilesetSpec('twos', text_format)
         ],
         outputs=[FieldSpec('threes', float),
                  FieldSpec('fours', float)],
         desc=("A pipeline that tests loading of requirements in "
               "map nodes"),
         references=[],
     )
     # Convert from DICOM to NIfTI.gz format on input
     merge = pipeline.create_node(Merge(2), "merge")
     maths = pipeline.create_map_node(TestMathWithReq(),
                                      "maths",
                                      iterfield='x',
                                      requirements=[(notinstalled1_req,
                                                     notinstalled2_req,
                                                     first_req),
                                                    second_req])
     split = pipeline.create_node(Split(), 'split')
     split.inputs.splits = [1, 1]
     split.inputs.squeeze = True
     maths.inputs.op = 'add'
     maths.inputs.y = 2
     pipeline.connect_input('ones', merge, 'in1')
     pipeline.connect_input('twos', merge, 'in2')
     pipeline.connect(merge, 'out', maths, 'x')
     pipeline.connect(maths, 'z', split, 'inlist')
     pipeline.connect_output('threes', split, 'out1')
     pipeline.connect_output('fours', split, 'out2')
     return pipeline
Example #4
0
class DummyStudy(with_metaclass(StudyMetaClass, Study)):

    add_data_specs = [
        AcquiredFilesetSpec('source1', text_format),
        AcquiredFilesetSpec('source2', text_format, optional=True),
        AcquiredFilesetSpec('source3', text_format, optional=True),
        AcquiredFilesetSpec('source4', text_format, optional=True),
        FilesetSpec('sink1', text_format, 'dummy_pipeline'),
        FilesetSpec('sink3', text_format, 'dummy_pipeline'),
        FilesetSpec('sink4', text_format, 'dummy_pipeline'),
        FilesetSpec('subject_sink',
                    text_format,
                    'dummy_pipeline',
                    frequency='per_subject'),
        FilesetSpec('visit_sink',
                    text_format,
                    'dummy_pipeline',
                    frequency='per_visit'),
        FilesetSpec('project_sink',
                    text_format,
                    'dummy_pipeline',
                    frequency='per_study'),
        FilesetSpec('resink1', text_format, 'dummy_pipeline'),
        FilesetSpec('resink2', text_format, 'dummy_pipeline'),
        FilesetSpec('resink3', text_format, 'dummy_pipeline'),
        FieldSpec('field1', int, 'dummy_pipeline'),
        FieldSpec('field2', float, 'dummy_pipeline'),
        FieldSpec('field3', str, 'dummy_pipeline')
    ]

    def dummy_pipeline(self):
        pass
Example #5
0
class RequirementsAnalysis(with_metaclass(AnalysisMetaClass, Analysis)):

    add_data_specs = [
        InputFilesetSpec('ones', text_format),
        FilesetSpec('twos', text_format, 'pipeline1'),
        FieldSpec('threes', float, 'pipeline2'),
        FieldSpec('fours', float, 'pipeline2')
    ]

    def pipeline1(self, **name_maps):
        pipeline = self.new_pipeline(
            name='pipeline1',
            desc=("A pipeline that tests loading of requirements"),
            name_maps=name_maps)
        # Convert from DICOM to NIfTI.gz format on input
        maths = pipeline.add(
            "maths",
            TestMathWithReq(),
            requirements=[first_req.v('0.15.9'),
                          second_req.v('1.0.2')])
        maths.inputs.op = 'add'
        maths.inputs.as_file = True
        maths.inputs.y = 1
        pipeline.connect_input('ones', maths, 'x', text_format)
        pipeline.connect_output('twos', maths, 'z', text_format)
        return pipeline

    def pipeline2(self, **name_maps):
        pipeline = self.new_pipeline(
            name='pipeline2',
            desc=("A pipeline that tests loading of requirements in "
                  "map nodes"),
            name_maps=name_maps)
        # Convert from DICOM to NIfTI.gz format on input
        merge = pipeline.add("merge", Merge(2))
        maths = pipeline.add(
            "maths",
            TestMathWithReq(),
            iterfield='x',
            requirements=[first_req.v('0.15.9'),
                          second_req.v('1.0.2')])
        split = pipeline.add('split', Split())
        split.inputs.splits = [1, 1]
        split.inputs.squeeze = True
        maths.inputs.op = 'add'
        maths.inputs.y = 2
        pipeline.connect_input('ones', merge, 'in1', text_format)
        pipeline.connect_input('twos', merge, 'in2', text_format)
        pipeline.connect(merge, 'out', maths, 'x')
        pipeline.connect(maths, 'z', split, 'inlist')
        pipeline.connect_output('threes', split, 'out1', text_format)
        pipeline.connect_output('fours', split, 'out2', text_format)
        return pipeline
Example #6
0
 def test_fileset_and_field(self):
     objs = [FilesetSpec('a', text_format,
                         'dummy_pipeline1'),
             FieldSpec('b', int, 'dummy_pipeline2')]
     for i, obj in enumerate(objs):
         fname = op.join(self.pkl_dir, '{}.pkl'.format(i))
         with open(fname, 'wb') as f:
             pkl.dump(obj, f)
         with open(fname, 'rb') as f:
             re_obj = pkl.load(f)
         self.assertEqual(obj, re_obj)
Example #7
0
File: epi.py Project: amrka/banana
    def field_map_time_info_pipeline(self, **kwargs):

        pipeline = self.create_pipeline(
            name='field_map_time_info_pipeline',
            inputs=[DatasetSpec('field_map_mag', dicom_format)],
            outputs=[FieldSpec('field_map_delta_te', float)],
            desc=("Pipeline to extract delta TE from field map "
                  "images, if provided"),
            version=1,
            citations=[fsl_cite],
            **kwargs)

        delta_te = pipeline.create_node(FieldMapTimeInfo(),
                                        name='extract_delta_te')
        pipeline.connect_input('field_map_mag', delta_te, 'fm_mag')
        pipeline.connect_output('field_map_delta_te', delta_te, 'delta_te')

        return pipeline
Example #8
0
class MotionDetectionMixin(MultiStudy, metaclass=MultiStudyMetaClass):

    #     add_substudy_specs = [
    #         SubStudySpec('pet_mc', PetStudy)]

    add_data_specs = [
        InputFilesetSpec('pet_data_dir', directory_format, optional=True),
        InputFilesetSpec('pet_data_reconstructed',
                         directory_format,
                         optional=True),
        InputFilesetSpec('struct2align', nifti_gz_format, optional=True),
        InputFilesetSpec('umap', dicom_format, optional=True),
        FilesetSpec('pet_data_prepared', directory_format,
                    'prepare_pet_pipeline'),
        FilesetSpec('static_motion_correction_results', directory_format,
                    'motion_correction_pipeline'),
        FilesetSpec('dynamic_motion_correction_results', directory_format,
                    'motion_correction_pipeline'),
        FilesetSpec('mean_displacement', text_format,
                    'mean_displacement_pipeline'),
        FilesetSpec('mean_displacement_rc', text_format,
                    'mean_displacement_pipeline'),
        FilesetSpec('mean_displacement_consecutive', text_format,
                    'mean_displacement_pipeline'),
        FilesetSpec('mats4average', text_format, 'mean_displacement_pipeline'),
        FilesetSpec('start_times', text_format, 'mean_displacement_pipeline'),
        FilesetSpec('motion_par_rc', text_format,
                    'mean_displacement_pipeline'),
        FilesetSpec('motion_par', text_format, 'mean_displacement_pipeline'),
        FilesetSpec('offset_indexes', text_format,
                    'mean_displacement_pipeline'),
        FilesetSpec('severe_motion_detection_report', text_format,
                    'mean_displacement_pipeline'),
        FilesetSpec('frame_start_times', text_format,
                    'motion_framing_pipeline'),
        FilesetSpec('frame_vol_numbers', text_format,
                    'motion_framing_pipeline'),
        FilesetSpec('timestamps', directory_format, 'motion_framing_pipeline'),
        FilesetSpec('mean_displacement_plot', png_format,
                    'plot_mean_displacement_pipeline'),
        FilesetSpec('rotation_plot', png_format,
                    'plot_mean_displacement_pipeline'),
        FilesetSpec('translation_plot', png_format,
                    'plot_mean_displacement_pipeline'),
        FilesetSpec('average_mats', directory_format,
                    'frame_mean_transformation_mats_pipeline'),
        FilesetSpec('correction_factors', text_format,
                    'pet_correction_factors_pipeline'),
        FilesetSpec('umaps_align2ref', directory_format,
                    'umap_realignment_pipeline'),
        FilesetSpec('umap_aligned_dicoms', directory_format,
                    'nifti2dcm_conversion_pipeline'),
        FilesetSpec('motion_detection_output', directory_format,
                    'gather_outputs_pipeline'),
        FilesetSpec('moco_series', directory_format,
                    'create_moco_series_pipeline'),
        FilesetSpec('fixed_binning_mats', directory_format,
                    'fixed_binning_pipeline'),
        FieldSpec('pet_duration', int, 'pet_header_extraction_pipeline'),
        FieldSpec('pet_end_time', str, 'pet_header_extraction_pipeline'),
        FieldSpec('pet_start_time', str, 'pet_header_extraction_pipeline')
    ]

    add_param_specs = [
        ParamSpec('framing_th', 2.0),
        ParamSpec('framing_temporal_th', 30.0),
        ParamSpec('framing_duration', 0),
        ParamSpec('md_framing', True),
        ParamSpec('align_pct', False),
        ParamSpec('align_fixed_binning', False),
        ParamSpec('moco_template',
                  os.path.join(reference_path, 'moco_template.IMA')),
        ParamSpec('PET_template_MNI',
                  os.path.join(template_path, 'PET_template_MNI.nii.gz')),
        ParamSpec('fixed_binning_n_frames', 0),
        ParamSpec('pet_offset', 0),
        ParamSpec('fixed_binning_bin_len', 60),
        ParamSpec('crop_xmin', 100),
        ParamSpec('crop_xsize', 130),
        ParamSpec('crop_ymin', 100),
        ParamSpec('crop_ysize', 130),
        ParamSpec('crop_zmin', 20),
        ParamSpec('crop_zsize', 100),
        ParamSpec('PET2MNI_reg', False),
        ParamSpec('dynamic_pet_mc', False)
    ]

    def mean_displacement_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            name='mean_displacement_calculation',
            desc=("Calculate the mean displacement between each motion"
                  " matrix and a reference."),
            citations=[fsl_cite],
            name_maps=name_maps)

        motion_mats_in = {}
        tr_in = {}
        start_time_in = {}
        real_duration_in = {}
        merge_index = 1
        input_names = []
        for spec in self.substudy_specs():
            try:
                spec.map('motion_mats')
            except ArcanaNameError:
                pass  # Sub study doesn't have motion mats spec
            else:
                k = 'in{}'.format(merge_index)
                motion_mats_in[k] = (spec.map('motion_mats'),
                                     motion_mats_format)
                tr_in[k] = (spec.map('tr'), float)
                start_time_in[k] = (spec.map('start_time'), float)
                real_duration_in[k] = (spec.map('real_duration'), float)
                input_names.append(
                    self.spec(spec.map(
                        spec.study_class.primary_scan_name)).pattern)
                merge_index += 1

        merge_motion_mats = pipeline.add('merge_motion_mats',
                                         Merge(len(motion_mats_in)),
                                         inputs=motion_mats_in)

        merge_tr = pipeline.add('merge_tr', Merge(len(tr_in)), inputs=tr_in)

        merge_start_time = pipeline.add('merge_start_time',
                                        Merge(len(start_time_in)),
                                        inputs=start_time_in)

        merge_real_duration = pipeline.add('merge_real_duration',
                                           Merge(len(real_duration_in)),
                                           inputs=real_duration_in)

        pipeline.add(
            'scan_time_info',
            MeanDisplacementCalculation(input_names=input_names),
            inputs={
                'motion_mats': (merge_motion_mats, 'out'),
                'trs': (merge_tr, 'out'),
                'start_times': (merge_start_time, 'out'),
                'real_durations': (merge_real_duration, 'out'),
                'reference': ('ref_brain', nifti_gz_format)
            },
            outputs={
                'mean_displacement': ('mean_displacement', text_format),
                'mean_displacement_rc': ('mean_displacement_rc', text_format),
                'mean_displacement_consecutive':
                ('mean_displacement_consecutive', text_format),
                'start_times': ('start_times', text_format),
                'motion_par_rc': ('motion_parameters_rc', text_format),
                'motion_par': ('motion_parameters', text_format),
                'offset_indexes': ('offset_indexes', text_format),
                'mats4average': ('mats4average', text_format),
                'severe_motion_detection_report':
                ('corrupted_volumes', text_format)
            })

        return pipeline

    def motion_framing_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            name='motion_framing',
            desc=("Calculate when the head movement exceeded a "
                  "predefined threshold (default 2mm)."),
            citations=[fsl_cite],
            name_maps=name_maps)

        framing = pipeline.add(
            'motion_framing',
            MotionFraming(
                motion_threshold=self.parameter('framing_th'),
                temporal_threshold=self.parameter('framing_temporal_th'),
                pet_offset=self.parameter('pet_offset'),
                pet_duration=self.parameter('framing_duration')),
            inputs={
                'mean_displacement': ('mean_displacement', text_format),
                'mean_displacement_consec':
                ('mean_displacement_consecutive', text_format),
                'start_times': ('start_times', text_format)
            },
            outputs={
                'frame_start_times': ('frame_start_times', text_format),
                'frame_vol_numbers': ('frame_vol_numbers', text_format),
                'timestamps': ('timestamps_dir', directory_format)
            })

        if 'pet_data_dir' in self.input_names:
            pipeline.connect_input('pet_start_time', framing, 'pet_start_time')
            pipeline.connect_input('pet_end_time', framing, 'pet_end_time')

        return pipeline

    def plot_mean_displacement_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            name='plot_mean_displacement',
            desc=("Plot the mean displacement real clock"),
            citations=[fsl_cite],
            name_maps=name_maps)

        pipeline.add(
            'plot_md',
            PlotMeanDisplacementRC(framing=self.parameter('md_framing')),
            inputs={
                'mean_disp_rc': ('mean_displacement_rc', text_format),
                'false_indexes': ('offset_indexes', text_format),
                'frame_start_times': ('frame_start_times', text_format),
                'motion_par_rc': ('motion_par_rc', text_format)
            },
            outputs={
                'mean_displacement_plot': ('mean_disp_plot', png_format),
                'rotation_plot': ('rot_plot', png_format),
                'translation_plot': ('trans_plot', png_format)
            })

        return pipeline

    def frame_mean_transformation_mats_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            name='frame_mean_transformation_mats',
            desc=("Average all the transformation mats within each "
                  "detected frame."),
            citations=[fsl_cite],
            name_maps=name_maps)

        pipeline.add(
            'mats_averaging',
            AffineMatAveraging(),
            inputs={
                'frame_vol_numbers': ('frame_vol_numbers', text_format),
                'all_mats4average': ('mats4average', text_format)
            },
            outputs={'average_mats': ('average_mats', directory_format)})

        return pipeline

    def fixed_binning_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            name='fixed_binning',
            desc=("Pipeline to generate average motion matrices for "
                  "each bin in a dynamic PET reconstruction experiment."
                  "This will be the input for the dynamic motion correction."),
            citations=[fsl_cite],
            name_maps=name_maps)

        pipeline.add(
            'fixed_binning',
            FixedBinning(n_frames=self.parameter('fixed_binning_n_frames'),
                         pet_offset=self.parameter('pet_offset'),
                         bin_len=self.parameter('fixed_binning_bin_len')),
            inputs={
                'start_times': ('start_times', text_format),
                'pet_start_time': ('pet_start_time', str),
                'pet_duration': ('pet_duration', int),
                'motion_mats': ('mats4average', text_format)
            },
            outputs={
                'fixed_binning_mats': ('average_bin_mats', directory_format)
            })

        return pipeline

    def pet_correction_factors_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            name='pet_correction_factors',
            desc=("Pipeline to calculate the correction factors to "
                  "account for frame duration when averaging the PET "
                  "frames to create the static PET image"),
            citations=[fsl_cite],
            name_maps=name_maps)

        pipeline.add(
            'pet_corr_factors',
            PetCorrectionFactor(),
            inputs={'timestamps': ('timestamps', directory_format)},
            outputs={'correction_factors': ('corr_factors', text_format)})

        return pipeline

    def nifti2dcm_conversion_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            name='conversion_to_dicom',
            desc=("Conversing aligned umap from nifti to dicom format - "
                  "parallel implementation"),
            citations=(),
            name_maps=name_maps)

        list_niftis = pipeline.add(
            'list_niftis',
            ListDir(),
            inputs={'directory': ('umaps_align2ref', directory_format)})

        reorient_niftis = pipeline.add('reorient_niftis',
                                       ReorientUmap(),
                                       inputs={
                                           'niftis': (list_niftis, 'files'),
                                           'umap': ('umap', dicom_format)
                                       },
                                       requirements=[mrtrix_req.v('3.0rc3')])

        list_dicoms = pipeline.add(
            'list_dicoms',
            ListDir(sort_key=dicom_fname_sort_key),
            inputs={'directory': ('umap', dicom_format)})

        nii2dicom = pipeline.add(
            'nii2dicom',
            Nii2Dicom(
                # extension='Frame',  #  nii2dicom parameter
            ),
            inputs={'reference_dicom': (list_dicoms, 'files')},
            outputs={'in_file': (reorient_niftis, 'reoriented_umaps')},
            iterfield=['in_file'],
            wall_time=20)

        pipeline.add(
            'copy2dir',
            CopyToDir(extension='Frame'),
            inputs={'in_files': (nii2dicom, 'out_file')},
            outputs={'umap_aligned_dicoms': ('out_dir', directory_format)})

        return pipeline

    def umap_realignment_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            name='umap_realignment',
            desc=("Pipeline to align the original umap (if provided)"
                  "to match the head position in each frame and improve the "
                  "static PET image quality."),
            citations=[fsl_cite],
            name_maps=name_maps)

        pipeline.add(
            'umap2ref_alignment',
            UmapAlign2Reference(pct=self.parameter('align_pct')),
            inputs={
                'ute_regmat': ('umap_ref_coreg_matrix', text_matrix_format),
                'ute_qform_mat': ('umap_ref_qform_mat', text_matrix_format),
                'average_mats': ('average_mats', directory_format),
                'umap': ('umap', nifti_gz_format)
            },
            outputs={'umaps_align2ref': ('umaps_align2ref', directory_format)},
            requirements=[fsl_req.v('5.0.9')])

        return pipeline

    def create_moco_series_pipeline(self, **name_maps):
        """This pipeline is probably wrong as we still do not know how to
        import back the new moco series into the scanner. This was just a first
        attempt.
        """

        pipeline = self.new_pipeline(
            name='create_moco_series',
            desc=("Pipeline to generate a moco_series that can be then "
                  "imported back in the scanner and used to correct the"
                  " pet data"),
            citations=[fsl_cite],
            name_maps=name_maps)

        pipeline.add(
            'create_moco_series',
            CreateMocoSeries(moco_template=self.parameter('moco_template')),
            inputs={
                'start_times': ('start_times', text_format),
                'motion_par': ('motion_par', text_format)
            },
            outputs={'moco_series': ('modified_moco', directory_format)})

        return pipeline

    def gather_outputs_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            name='gather_motion_detection_outputs',
            desc=("Pipeline to gather together all the outputs from "
                  "the motion detection pipeline."),
            citations=[fsl_cite],
            name_maps=name_maps)

        merge_inputs = pipeline.add(
            'merge_inputs',
            Merge(5),
            inputs={
                'in1': ('mean_displacement_plot', png_format),
                'in2': ('motion_par', text_format),
                'in3': ('correction_factors', text_format),
                'in4': ('severe_motion_detection_report', text_format),
                'in5': ('timestamps', directory_format)
            })

        pipeline.add(
            'copy2dir',
            CopyToDir(),
            inputs={'in_files': (merge_inputs, 'out')},
            outputs={'motion_detection_output': ('out_dir', directory_format)})

        return pipeline

    prepare_pet_pipeline = MultiStudy.translate(
        'pet_mc', 'pet_data_preparation_pipeline')

    pet_header_extraction_pipeline = MultiStudy.translate(
        'pet_mc', 'pet_time_info_extraction_pipeline')

    def motion_correction_pipeline(self, **name_maps):

        if 'struct2align' in self.input_names:
            StructAlignment = True
        else:
            StructAlignment = False

        pipeline = self.new_pipeline(
            name='pet_mc',
            desc=("Given a folder with reconstructed PET data, this "
                  "pipeline will generate a motion corrected PET"
                  "image using information extracted from the MR-based "
                  "motion detection pipeline"),
            citations=[fsl_cite],
            name_maps=name_maps)

        check_pet = pipeline.add(
            'check_pet_data',
            CheckPetMCInputs(),
            inputs={
                'pet_data': ('pet_data_prepared', directory_format),
                'reference': ('ref_brain', nifti_gz_format)
            },
            requirements=[fsl_req.v('5.0.9'),
                          mrtrix_req.v('3.0rc3')])
        if self.branch('dynamic_pet_mc'):
            pipeline.connect_input('fixed_binning_mats', check_pet,
                                   'motion_mats')
        else:
            pipeline.connect_input('average_mats', check_pet, 'motion_mats')
            pipeline.connect_input('correction_factors', check_pet,
                                   'corr_factors')

        if StructAlignment:
            struct_reg = pipeline.add('ref2structural_reg',
                                      FLIRT(dof=6,
                                            cost_func='normmi',
                                            cost='normmi',
                                            output_type='NIFTI_GZ'),
                                      inputs={
                                          'reference':
                                          ('ref_brain', nifti_gz_format),
                                          'in_file':
                                          ('struct2align', nifti_gz_format)
                                      },
                                      requirements=[fsl_req.v('5.0.9')])

        if self.branch('dynamic_pet_mc'):
            pet_mc = pipeline.add('pet_mc',
                                  PetImageMotionCorrection(),
                                  inputs={
                                      'pet_image': (check_pet, 'pet_images'),
                                      'motion_mat': (check_pet, 'motion_mats'),
                                      'pet2ref_mat': (check_pet, 'pet2ref_mat')
                                  },
                                  requirements=[fsl_req.v('5.0.9')],
                                  iterfield=['pet_image', 'motion_mat'])
        else:
            pet_mc = pipeline.add(
                'pet_mc',
                PetImageMotionCorrection(),
                inputs={'corr_factor': (check_pet, 'corr_factors')},
                requirements=[fsl_req.v('5.0.9')],
                iterfield=['corr_factor', 'pet_image', 'motion_mat'])

        if StructAlignment:
            pipeline.connect(struct_reg, 'out_matrix_file', pet_mc,
                             'structural2ref_regmat')
            pipeline.connect_input('struct2align', pet_mc, 'structural_image')
        if self.parameter('PET2MNI_reg'):
            mni_reg = True
        else:
            mni_reg = False

        if self.branch('dynamic_pet_mc'):
            merge_mc = pipeline.add(
                'merge_pet_mc',
                fsl.Merge(dimension='t'),
                inputs={'in_files': (pet_mc, 'pet_mc_image')},
                requirements=[fsl_req.v('5.0.9')])

            merge_no_mc = pipeline.add(
                'merge_pet_no_mc',
                fsl.Merge(dimension='t'),
                inputs={'in_files': (pet_mc, 'pet_no_mc_image')},
                requirements=[fsl_req.v('5.0.9')])
        else:
            static_mc = pipeline.add('static_mc_generation',
                                     StaticPETImageGeneration(),
                                     inputs={
                                         'pet_mc_images':
                                         (pet_mc, 'pet_mc_image'),
                                         'pet_no_mc_images':
                                         (pet_mc, 'pet_no_mc_image')
                                     },
                                     requirements=[fsl_req.v('5.0.9')])

        merge_outputs = pipeline.add(
            'merge_outputs',
            Merge(3),
            inputs={'in1': ('mean_displacement_plot', png_format)})

        if not StructAlignment:
            cropping = pipeline.add(
                'pet_cropping',
                PETFovCropping(x_min=self.parameter('crop_xmin'),
                               x_size=self.parameter('crop_xsize'),
                               y_min=self.parameter('crop_ymin'),
                               y_size=self.parameter('crop_ysize'),
                               z_min=self.parameter('crop_zmin'),
                               z_size=self.parameter('crop_zsize')))
            if self.branch('dynamic_pet_mc'):
                pipeline.connect(merge_mc, 'merged_file', cropping,
                                 'pet_image')
            else:
                pipeline.connect(static_mc, 'static_mc', cropping, 'pet_image')

            cropping_no_mc = pipeline.add(
                'pet_no_mc_cropping',
                PETFovCropping(x_min=self.parameter('crop_xmin'),
                               x_size=self.parameter('crop_xsize'),
                               y_min=self.parameter('crop_ymin'),
                               y_size=self.parameter('crop_ysize'),
                               z_min=self.parameter('crop_zmin'),
                               z_size=self.parameter('crop_zsize')))
            if self.branch('dynamic_pet_mc'):
                pipeline.connect(merge_no_mc, 'merged_file', cropping_no_mc,
                                 'pet_image')
            else:
                pipeline.connect(static_mc, 'static_no_mc', cropping_no_mc,
                                 'pet_image')

            if mni_reg:
                if self.branch('dynamic_pet_mc'):
                    t_mean = pipeline.add(
                        'PET_temporal_mean',
                        ImageMaths(op_string='-Tmean'),
                        inputs={'in_file': (cropping, 'pet_cropped')},
                        requirements=[fsl_req.v('5.0.9')])

                reg_tmean2MNI = pipeline.add(
                    'reg2MNI',
                    AntsRegSyn(num_dimensions=3,
                               transformation='s',
                               out_prefix='reg2MNI',
                               num_threads=4,
                               ref_file=self.parameter('PET_template_MNI')),
                    wall_time=25,
                    requirements=[ants_req.v('2')])

                if self.branch('dynamic_pet_mc'):
                    pipeline.connect(t_mean, 'out_file', reg_tmean2MNI,
                                     'input_file')

                    merge_trans = pipeline.add('merge_transforms',
                                               Merge(2),
                                               inputs={
                                                   'in1': (reg_tmean2MNI,
                                                           'warp_file'),
                                                   'in2':
                                                   (reg_tmean2MNI, 'regmat')
                                               },
                                               wall_time=1)

                    apply_trans = pipeline.add(
                        'apply_trans',
                        ApplyTransforms(
                            reference_image=self.parameter('PET_template_MNI'),
                            interpolation='Linear',
                            input_image_type=3),
                        inputs={
                            'input_image': (cropping, 'pet_cropped'),
                            'transforms': (merge_trans, 'out')
                        },
                        wall_time=7,
                        mem_gb=24,
                        requirements=[ants_req.v('2')])
                    pipeline.connect(apply_trans, 'output_image',
                                     merge_outputs, 'in2'),
                else:
                    pipeline.connect(cropping, 'pet_cropped', reg_tmean2MNI,
                                     'input_file')
                    pipeline.connect(reg_tmean2MNI, 'reg_file', merge_outputs,
                                     'in2')
            else:
                pipeline.connect(cropping, 'pet_cropped', merge_outputs, 'in2')
            pipeline.connect(cropping_no_mc, 'pet_cropped', merge_outputs,
                             'in3')
        else:
            if self.branch('dynamic_pet_mc'):
                pipeline.connect(merge_mc, 'merged_file', merge_outputs, 'in2')
                pipeline.connect(merge_no_mc, 'merged_file', merge_outputs,
                                 'in3')
            else:
                pipeline.connect(static_mc, 'static_mc', merge_outputs, 'in2')
                pipeline.connect(static_mc, 'static_no_mc', merge_outputs,
                                 'in3')


#         mcflirt = pipeline.add('mcflirt', MCFLIRT())
#                 'in_file': (merge_mc_ps, 'merged_file'),
#                 cost='normmi',

        copy2dir = pipeline.add('copy2dir',
                                CopyToDir(),
                                inputs={'in_files': (merge_outputs, 'out')})
        if self.branch('dynamic_pet_mc'):
            pipeline.connect_output('dynamic_motion_correction_results',
                                    copy2dir, 'out_dir')
        else:
            pipeline.connect_output('static_motion_correction_results',
                                    copy2dir, 'out_dir')
        return pipeline
Example #9
0
class MriStudy(Study, metaclass=StudyMetaClass):

    add_data_specs = [
        InputFilesetSpec('magnitude', STD_IMAGE_FORMATS,
                         desc=("Typically the primary scan acquired from "
                               "the scanner for the given contrast")),
        InputFilesetSpec(
            'coreg_ref', STD_IMAGE_FORMATS,
            desc=("A reference scan to coregister the primary scan to. Should "
                  "not be brain extracted"),
            optional=True),
        InputFilesetSpec(
            'coreg_ref_brain', STD_IMAGE_FORMATS,
            desc=("A brain-extracted reference scan to coregister a brain-"
                  "extracted scan to. Note that the output of the "
                  "registration brain_coreg can also be derived by brain "
                  "extracting the output of coregistration performed "
                  "before brain extraction if 'coreg_ref' is provided"),
            optional=True),
        InputFilesetSpec(
            'channels', (multi_nifti_gz_format, zip_format),
            optional=True, desc=("Reconstructed complex image for each "
                                 "coil without standardisation.")),
        InputFilesetSpec('header_image', dicom_format, desc=(
            "A dataset that contains correct the header information for the "
            "acquired image. Used to copy geometry over preprocessed "
            "channels"), optional=True),
        FilesetSpec('mag_preproc', nifti_gz_format, 'prepare_pipeline',
                    desc=("Magnitude after basic preprocessing, such as "
                          "realigning image axis to a standard rotation")),
        FilesetSpec('mag_channels', multi_nifti_gz_format,
                    'preprocess_channels_pipeline'),
        FilesetSpec('phase_channels', multi_nifti_gz_format,
                    'preprocess_channels_pipeline'),
        FilesetSpec('brain', nifti_gz_format, 'brain_extraction_pipeline',
                    desc="The brain masked image"),
        FilesetSpec('brain_mask', nifti_gz_format, 'brain_extraction_pipeline',
                    desc="Mask of the brain"),
        FilesetSpec('mag_coreg', nifti_gz_format, 'coreg_pipeline',
                    desc="Head image coregistered to 'coreg_ref'"),
        FilesetSpec('brain_coreg', nifti_gz_format,
                    'brain_coreg_pipeline',
                    desc=("Either brain-extracted image coregistered to "
                          "'coreg_ref_brain' or a brain extraction of a "
                          "coregistered (incl. skull) image")),
        FilesetSpec('brain_mask_coreg', nifti_gz_format,
                    'brain_coreg_pipeline',
                    desc=("Either brain-extracted image coregistered to "
                          "'coreg_ref_brain' or a brain extraction of a "
                          "coregistered (incl. skull) image")),
        FilesetSpec('coreg_ants_mat', text_matrix_format,
                    'coreg_ants_mat_pipeline'),
        FilesetSpec('coreg_fsl_mat', text_matrix_format,
                    'coreg_fsl_mat_pipeline'),
        FilesetSpec('mag_coreg_to_tmpl', nifti_gz_format,
                    'coreg_to_tmpl_pipeline'),
        FilesetSpec('coreg_to_tmpl_fsl_coeff', nifti_gz_format,
                    'coreg_to_tmpl_pipeline'),
        FilesetSpec('coreg_to_tmpl_fsl_report', gif_format,
                    'coreg_to_tmpl_pipeline'),
        FilesetSpec('coreg_to_tmpl_ants_mat', text_matrix_format,
                    'coreg_to_tmpl_pipeline'),
        FilesetSpec('coreg_to_tmpl_ants_warp', nifti_gz_format,
                    'coreg_to_tmpl_pipeline'),
        FilesetSpec('motion_mats', motion_mats_format, 'motion_mat_pipeline'),
        FilesetSpec('qformed', nifti_gz_format, 'qform_transform_pipeline'),
        FilesetSpec('qform_mat', text_matrix_format,
                    'qform_transform_pipeline'),
        FieldSpec('tr', float, 'header_extraction_pipeline'),
        FieldSpec('echo_times', float, 'header_extraction_pipeline',
                  array=True),
        FieldSpec('voxel_sizes', float, 'header_extraction_pipeline',
                  array=True),
        FieldSpec('main_field_orient', float, 'header_extraction_pipeline',
                  array=True),
        FieldSpec('main_field_strength', float, 'header_extraction_pipeline'),
        FieldSpec('start_time', float, 'header_extraction_pipeline'),
        FieldSpec('real_duration', float, 'header_extraction_pipeline'),
        FieldSpec('total_duration', float, 'header_extraction_pipeline'),
        FieldSpec('ped', str, 'header_extraction_pipeline'),
        FieldSpec('pe_angle', float, 'header_extraction_pipeline'),
        # Templates
        InputFilesetSpec('template', STD_IMAGE_FORMATS, frequency='per_study',
                         default=FslReferenceData(
                             'MNI152_T1',
                             format=nifti_gz_format,
                             resolution='mni_template_resolution')),
        InputFilesetSpec('template_brain', STD_IMAGE_FORMATS,
                         frequency='per_study',
                         default=FslReferenceData(
                             'MNI152_T1',
                             format=nifti_gz_format,
                             resolution='mni_template_resolution',
                             dataset='brain')),
        InputFilesetSpec('template_mask', STD_IMAGE_FORMATS,
                         frequency='per_study',
                         default=FslReferenceData(
                             'MNI152_T1',
                             format=nifti_gz_format,
                             resolution='mni_template_resolution',
                             dataset='brain_mask'))]

    add_param_specs = [
        SwitchSpec('resample_coreg_ref', False,
                   desc=("Whether to resample the coregistration reference "
                         "image to the resolution of the moving image")),
        SwitchSpec('reorient_to_std', True),
        ParamSpec('force_channel_flip', None, dtype=str, array=True,
                      desc=("Forcibly flip channel inputs during preprocess "
                            "channels to correct issues with channel recon. "
                            "The inputs are passed directly through to FSL's "
                            "swapdims (see fsl.SwapDimensions interface)")),
        SwitchSpec('bet_robust', True),
        ParamSpec('bet_f_threshold', 0.5),
        SwitchSpec('bet_reduce_bias', False,
                   desc="Only used if not 'bet_robust'"),
        ParamSpec('bet_g_threshold', 0.0),
        SwitchSpec('bet_method', 'fsl_bet', ('fsl_bet', 'optibet')),
        SwitchSpec('optibet_gen_report', False),
        SwitchSpec('coreg_to_tmpl_method', 'ants', ('fnirt', 'ants')),
        ParamSpec('mni_template_resolution', None, choices=(0.5, 1, 2),
                  dtype=int),
        ParamSpec('fnirt_intensity_model', 'global_non_linear_with_bias'),
        ParamSpec('fnirt_subsampling', [4, 4, 2, 2, 1, 1]),
        ParamSpec('reoriented_dims', ('RL', 'AP', 'IS')),
        ParamSpec('resampled_resolution', None, dtype=list),
        SwitchSpec('coreg_method', 'ants', ('ants', 'flirt', 'spm'),
                   desc="The tool to use for linear registration"),
        ParamSpec('flirt_degrees_of_freedom', 6, desc=(
            "Number of degrees of freedom used in the registration. "
            "Default is 6 -> affine transformation.")),
        ParamSpec('flirt_cost_func', 'normmi', desc=(
            "Cost function used for the registration. Can be one of "
            "'mutualinfo', 'corratio', 'normcorr', 'normmi', 'leastsq',"
            " 'labeldiff', 'bbr'")),
        ParamSpec('flirt_qsform', False, desc=(
            "Whether to use the QS form supplied in the input image "
            "header (the image coordinates of the FOV supplied by the "
            "scanner")),
        ParamSpec(
            'channel_fname_regex',
            r'.*_(?P<channel>\d+)_(?P<echo>\d+)_(?P<axis>[A-Z]+)\.nii\.gz',
            desc=("The regular expression to extract channel, echo and complex"
                  " axis from the filenames of the coils channel images")),
        ParamSpec(
            'channel_real_label', 'REAL',
            desc=("The name of the real axis extracted from the channel "
                  "filename")),
        ParamSpec(
            'channel_imag_label', 'IMAGINARY',
            desc=("The name of the real axis extracted from the channel "
                  "filename"))]

    @property
    def mni_template_resolution(self):
        if self.parameter('mni_template_resolution') is not None:
            res = self.parameter('mni_template_resolution')
        else:
            raise ArcanaMissingDataException(
                "Automatic detection of dataset resolution is not implemented "
                "yet, please specify resolution of default MNI templates "
                "manually via 'mni_template_resolution' parameter")
        return res

    @property
    def is_coregistered(self):
        return self.provided('coreg_ref') or self.provided('coreg_ref_brain')

    @property
    def header_image_spec_name(self):
        if self.provided('header_image'):
            hdr_name = 'header_image'
        else:
            hdr_name = 'magnitude'
        return hdr_name

    @property
    def brain_spec_name(self):
        """
        The name of the brain extracted image after registration has been
        applied if registration is specified by supplying 'coreg_ref' or
        'coreg_ref_brain' optional inputs.
        """
        if self.is_coregistered:
            name = 'brain_coreg'
        else:
            name = 'brain'
        return name

    @property
    def brain_mask_spec_name(self):
        if self.is_coregistered:
            brain_mask = 'brain_mask_coreg'
        else:
            brain_mask = 'brain_mask'
        return brain_mask

    def preprocess_channels_pipeline(self, **name_maps):
        pipeline = self.new_pipeline(
            'preprocess_channels',
            name_maps=name_maps,
            desc=("Convert channel signals in complex coords to polar coords "
                  "and combine"))

        if (self.provided('header_image') or
                self.branch('reorient_to_std') or
                self.parameter('force_channel_flip') is not None):
            # Read channel files reorient them into standard space and then
            # write back to directory
            list_channels = pipeline.add(
                'list_channels',
                ListDir(),
                inputs={
                    'directory': ('channels', multi_nifti_gz_format)})

            if self.parameter('force_channel_flip') is not None:
                force_flip = pipeline.add(
                    'flip_dims',
                    fsl.SwapDimensions(
                        new_dims=tuple(self.parameter('force_channel_flip'))),
                    inputs={
                        'in_file': (list_channels, 'files')},
                    iterfield=['in_file'])
                geom_dest_file = (force_flip, 'out_file')
            else:
                geom_dest_file = (list_channels, 'files')

            if self.provided('header_image'):
                # If header image is provided stomp its geometry over the
                # acquired channels
                copy_geom = pipeline.add(
                    'qsm_copy_geometry',
                    fsl.CopyGeom(
                        output_type='NIFTI_GZ'),
                    inputs={
                        'in_file': ('header_image', nifti_gz_format),
                        'dest_file': geom_dest_file},
                    iterfield=(['dest_file']),
                    requirements=[fsl_req.v('5.0.8')])
                reorient_in_file = (copy_geom, 'out_file')
            else:
                reorient_in_file = geom_dest_file

            if self.branch('reorient_to_std'):
                reorient = pipeline.add(
                    'reorient_channel',
                    fsl.Reorient2Std(
                        output_type='NIFTI_GZ'),
                    inputs={
                        'in_file': reorient_in_file},
                    iterfield=['in_file'],
                    requirements=[fsl_req.v('5.0.8')])
                copy_to_dir_in_files = (reorient, 'out_file')
            else:
                copy_to_dir_in_files = reorient_in_file

            copy_to_dir = pipeline.add(
                'copy_to_dir',
                CopyToDir(),
                inputs={
                    'in_files': copy_to_dir_in_files,
                    'file_names': (list_channels, 'files')})
            to_polar_in_dir = (copy_to_dir, 'out_dir')
        else:
            to_polar_in_dir = ('channels', multi_nifti_gz_format)

        pipeline.add(
            'to_polar',
            ToPolarCoords(
                in_fname_re=self.parameter('channel_fname_regex'),
                real_label=self.parameter('channel_real_label'),
                imaginary_label=self.parameter('channel_imag_label')),
            inputs={
                'in_dir': to_polar_in_dir},
            outputs={
                'mag_channels': ('magnitudes_dir', multi_nifti_gz_format),
                'phase_channels': ('phases_dir', multi_nifti_gz_format)})

        return pipeline

    def coreg_pipeline(self, **name_maps):
        if self.branch('coreg_method', 'flirt'):
            pipeline = self._flirt_linear_coreg_pipeline(**name_maps)
        elif self.branch('coreg_method', 'ants'):
            pipeline = self._ants_linear_coreg_pipeline(**name_maps)
        elif self.branch('coreg_method', 'spm'):
            pipeline = self._spm_linear_coreg_pipeline(**name_maps)
        else:
            self.unhandled_branch('coreg_method')
        if not self.provided(pipeline.map_input('coreg_ref')):
            raise ArcanaOutputNotProducedException(
                "Cannot co-register {} as reference image "
                "'{}' has not been provided".format(
                    pipeline.map_input('coreg_ref')))
        return pipeline

    def brain_extraction_pipeline(self, **name_maps):
        if self.branch('bet_method', 'fsl_bet'):
            pipeline = self._bet_brain_extraction_pipeline(**name_maps)
        elif self.branch('bet_method', 'optibet'):
            pipeline = self._optiBET_brain_extraction_pipeline(**name_maps)
        else:
            self.unhandled_branch('bet_method')
        return pipeline

    def brain_coreg_pipeline(self, **name_maps):
        """
        Coregistered + brain-extracted images can be derived in 2-ways. If an
        explicit brain-extracted reference is provided to
        'coreg_ref_brain' then that is used to coregister a brain extracted
        image against. Alternatively, if only a skull-included reference is
        provided then the registration is performed with skulls-included and
        then brain extraction is performed after
        """
        if self.provided('coreg_ref_brain'):
            # If a reference brain extracted image is provided we coregister
            # the brain extracted image to that
            pipeline = self.coreg_pipeline(
                name='brain_coreg',
                name_maps=dict(
                    input_map={
                        'mag_preproc': 'brain',
                        'coreg_ref': 'coreg_ref_brain'},
                    output_map={
                        'mag_coreg': 'brain_coreg'},
                    name_maps=name_maps))

            # Apply coregistration transform to brain mask
            if self.branch('coreg_method', 'flirt'):
                pipeline.add(
                    'mask_transform',
                    ApplyXFM(
                        output_type='NIFTI_GZ',
                        apply_xfm=True),
                    inputs={
                        'in_matrix_file': (pipeline.node('flirt'),
                                           'out_matrix_file'),
                        'in_file': ('brain_mask', nifti_gz_format),
                        'reference': ('coreg_ref_brain', nifti_gz_format)},
                    outputs={
                        'brain_mask_coreg': ('out_file', nifti_gz_format)},
                    requirements=[fsl_req.v('5.0.10')],
                    wall_time=10)

            elif self.branch('coreg_method', 'ants'):
                # Convert ANTs transform matrix to FSL format if we have used
                # Ants registration so we can apply the transform using
                # ApplyXFM
                pipeline.add(
                    'mask_transform',
                    ants.resampling.ApplyTransforms(
                        interpolation='Linear',
                        input_image_type=3,
                        invert_transform_flags=[True, True, False]),
                    inputs={
                        'input_image': ('brain_mask', nifti_gz_format),
                        'reference_image': ('coreg_ref_brain',
                                            nifti_gz_format),
                        'transforms': (pipeline.node('ants_reg'),
                                       'forward_transforms')},
                    requirements=[ants_req.v('1.9')], mem_gb=16,
                    wall_time=30)
            else:
                self.unhandled_branch('coreg_method')

        elif self.provided('coreg_ref'):
            # If coreg_ref is provided then we co-register the non-brain
            # extracted images and then brain extract the co-registered image
            pipeline = self.brain_extraction_pipeline(
                name='bet_coreg',
                input_map={'mag_preproc': 'mag_coreg'},
                output_map={'brain': 'brain_coreg',
                            'brain_mask': 'brain_mask_coreg'},
                name_maps=name_maps)
        else:
            raise BananaUsageError(
                "Either 'coreg_ref' or 'coreg_ref_brain' needs to be provided "
                "in order to derive brain_coreg or brain_mask_coreg")
        return pipeline

    def _coreg_mat_pipeline(self, **name_maps):
        if self.provided('coreg_ref_brain'):
            pipeline = self.brain_coreg_pipeline(**name_maps)
        elif self.provided('coreg_ref'):
            pipeline = self.coreg_pipeline(**name_maps)
        else:
            raise ArcanaOutputNotProducedException(
                "'coregistration matrices can only be derived if 'coreg_ref' "
                "or 'coreg_ref_brain' is provided to {}".format(self))
        return pipeline

    def coreg_ants_mat_pipeline(self, **name_maps):
        if self.branch('coreg_method', 'ants'):
            pipeline = self._coreg_mat_pipeline(**name_maps)
        else:
            # Run the coreg_mat pipeline only to generate the ANTs transform
            # and mapping the typical outputs to None so they don't override
            # the other settings
            pipeline = self._coreg_mat_pipeline(
                output_maps={
                    'mag_preproc': None,
                    'brain_coreg': None,
                    'brain_mask_coreg': None},
                name_maps=name_maps)
        return pipeline

    def coreg_fsl_mat_pipeline(self, **name_maps):
        if self.branch('coreg_method', 'flirt'):
            pipeline = self._coreg_mat_pipeline(**name_maps)
        elif self.branch('coreg_method', 'ants'):
            # Convert ANTS transform to FSL transform
            pipeline = self.new_pipeline(
                name='convert_ants_to_fsl_coreg_mat',
                name_maps=name_maps)

            if self.provided('coreg_ref'):
                source = 'mag_preproc'
                ref = 'coreg_ref'
            elif self.provided('coreg_ref_brain'):
                source = 'brain'
                ref = 'coreg_ref_brain'
            else:
                raise BananaUsageError(
                    "Either 'coreg_ref' or 'coreg_ref_brain' needs to be "
                    "provided in order to derive brain_coreg or brain_coreg_"
                    "mask")

            pipeline.add(
                'transform_conv',
                ANTs2FSLMatrixConversion(
                    ras2fsl=True),
                inputs={
                    'itk_file': ('coreg_ants_mat', text_matrix_format),
                    'source_file': (source, nifti_gz_format),
                    'reference_file': (ref, nifti_gz_format)},
                outputs={
                    'coreg_fsl_mat': ('fsl_matrix', text_matrix_format)},
                requirements=[c3d_req.v('1.0')])
        else:
            self.unhandled_branch('coreg_method')

        return pipeline

    def coreg_to_tmpl_pipeline(self, **name_maps):
        if self.branch('coreg_to_tmpl_method', 'fnirt'):
            pipeline = self._fnirt_to_tmpl_pipeline(**name_maps)
        elif self.branch('coreg_to_tmpl_method', 'ants'):
            pipeline = self._ants_to_tmpl_pipeline(**name_maps)
        else:
            self.unhandled_branch('coreg_to_tmpl_method')
        return pipeline

    def _flirt_linear_coreg_pipeline(self, **name_maps):
        """
        Registers a MR scan to a refernce MR scan using FSL's FLIRT command
        """

        pipeline = self.new_pipeline(
            name='linear_coreg',
            name_maps=name_maps,
            desc="Registers a MR scan against a reference image using FLIRT",
            citations=[fsl_cite])

        pipeline.add(
            'flirt',
            FLIRT(dof=self.parameter('flirt_degrees_of_freedom'),
                  cost=self.parameter('flirt_cost_func'),
                  cost_func=self.parameter('flirt_cost_func'),
                  output_type='NIFTI_GZ'),
            inputs={
                'in_file': ('mag_preproc', nifti_gz_format),
                'reference': ('coreg_ref', nifti_gz_format)},
            outputs={
                'mag_coreg': ('out_file', nifti_gz_format),
                'coreg_fsl_mat': ('out_matrix_file', text_matrix_format)},
            requirements=[fsl_req.v('5.0.8')],
            wall_time=5)

        return pipeline

    def _ants_linear_coreg_pipeline(self, **name_maps):
        """
        Registers a MR scan to a refernce MR scan using ANTS's linear_reg
        command
        """

        pipeline = self.new_pipeline(
            name='linear_coreg',
            name_maps=name_maps,
            desc="Registers a MR scan against a reference image using ANTs",
            citations=[ants_cite])

        pipeline.add(
            'ANTs_linear_Reg',
            AntsRegSyn(
                num_dimensions=3,
                transformation='r'),
            inputs={
                'ref_file': ('coreg_ref', nifti_gz_format),
                'input_file': ('mag_preproc', nifti_gz_format)},
            outputs={
                'mag_coreg': ('reg_file', nifti_gz_format),
                'coreg_ants_mat': ('regmat', text_matrix_format)},
            wall_time=10,
            requirements=[ants_req.v('2.0')])


#         ants_reg = pipeline.add(
#             'ants_reg',
#             ants.Registration(
#                 dimension=3,
#                 collapse_output_transforms=True,
#                 float=False,
#                 interpolation='Linear',
#                 use_histogram_matching=False,
#                 winsorize_upper_quantile=0.995,
#                 winsorize_lower_quantile=0.005,
#                 verbose=True,
#                 transforms=['Rigid'],
#                 transform_parameters=[(0.1,)],
#                 metric=['MI'],
#                 metric_weight=[1],
#                 radius_or_number_of_bins=[32],
#                 sampling_strategy=['Regular'],
#                 sampling_percentage=[0.25],
#                 number_of_iterations=[[1000, 500, 250, 100]],
#                 convergence_threshold=[1e-6],
#                 convergence_window_size=[10],
#                 shrink_factors=[[8, 4, 2, 1]],
#                 smoothing_sigmas=[[3, 2, 1, 0]],
#                 output_warped_image=True),
#             inputs={
#                 'fixed_image': ('coreg_ref', nifti_gz_format),
#                 'moving_image': ('mag_preproc', nifti_gz_format)},
#             outputs={
#                 'mag_coreg': ('warped_image', nifti_gz_format)},
#             wall_time=10,
#             requirements=[ants_req.v('2.0')])
#
#         pipeline.add(
#             'select',
#             SelectOne(
#                 index=0),
#             inputs={
#                 'inlist': (ants_reg, 'forward_transforms')},
#             outputs={
#                 'coreg_ants_mat': ('out', text_matrix_format)})

        return pipeline

    def _spm_linear_coreg_pipeline(self, **name_maps):  # @UnusedVariable
        """
        Coregisters T2 image to T1 image using SPM's "Register" method.

        NB: Default values come from the W2MHS toolbox
        """
        pipeline = self.new_pipeline(
            'linear_coreg',
            name_maps=name_maps,
            desc="Coregister T2-weighted images to T1",
            citations=[spm_cite])

        pipeline.add(
            'mag_coreg',
            Coregister(
                jobtype='estwrite',
                cost_function='nmi',
                separation=[4, 2],
                tolerance=[0.02, 0.02, 0.02, 0.001, 0.001, 0.001, 0.01, 0.01,
                           0.01, 0.001, 0.001, 0.001],
                fwhm=[7, 7],
                write_interp=4,
                write_wrap=[0, 0, 0],
                write_mask=False,
                out_prefix='r'),
            inputs={
                'target': ('coreg_ref', nifti_format),
                'source': ('mag_preproc', nifti_format)},
            outputs={
                'mag_coreg': ('coregistered_source', nifti_format)},
            requirements=[spm_req.v(12)],
            wall_time=30)
        return pipeline

    def qform_transform_pipeline(self, **name_maps):
        pipeline = self.new_pipeline(
            name='qform_transform',
            name_maps=name_maps,
            desc="Registers a MR scan against a reference image",
            citations=[fsl_cite])

        if self.provided('coreg_ref'):
            in_file = 'mag_preproc'
            reference = 'coreg_ref'
        elif self.provided('coreg_ref_brain'):
            in_file = 'brain'
            reference = 'coreg_ref_brain'
        else:
            raise BananaUsageError(
                "'coreg_ref' or 'coreg_ref_brain' need to be provided to "
                "study in order to run qform_transform")

        pipeline.add(
            'flirt',
            FLIRT(
                uses_qform=True,
                apply_xfm=True,
                output_type='NIFTI_GZ'),
            inputs={
                'in_file': (in_file, nifti_gz_format),
                'reference': (reference, nifti_gz_format)},
            outputs={
                'qformed': ('out_file', nifti_gz_format),
                'qform_mat': ('out_matrix_file', text_matrix_format)},
            requirements=[fsl_req.v('5.0.8')],
            wall_time=5)

        return pipeline

    def _bet_brain_extraction_pipeline(self, **name_maps):
        """
        Generates a whole brain mask using FSL's BET command.
        """
        pipeline = self.new_pipeline(
            name='brain_extraction',
            name_maps=name_maps,
            desc="Generate brain mask from mr_scan",
            citations=[fsl_cite, bet_cite, bet2_cite])
        # Create mask node
        bet = pipeline.add(
            "bet",
            fsl.BET(
                mask=True,
                output_type='NIFTI_GZ',
                frac=self.parameter('bet_f_threshold'),
                vertical_gradient=self.parameter('bet_g_threshold')),
            inputs={
                'in_file': ('mag_preproc', nifti_gz_format)},
            outputs={
                'brain': ('out_file', nifti_gz_format),
                'brain_mask': ('mask_file', nifti_gz_format)},
            requirements=[fsl_req.v('5.0.9')])
        # Set either robust or reduce bias
        if self.branch('bet_robust'):
            bet.inputs.robust = True
        else:
            bet.inputs.reduce_bias = self.parameter('bet_reduce_bias')
        return pipeline

    def _optiBET_brain_extraction_pipeline(self, **name_maps):
        """
        Generates a whole brain mask using a modified optiBET approach.
        """
        pipeline = self.new_pipeline(
            name='brain_extraction',
            name_maps=name_maps,
            desc=("Modified implementation of optiBET.sh"),
            citations=[fsl_cite])

        mni_reg = pipeline.add(
            'T1_reg',
            AntsRegSyn(
                num_dimensions=3,
                transformation='s',
                out_prefix='T12MNI',
                num_threads=4),
            inputs={
                'ref_file': ('template', nifti_gz_format),
                'input_file': ('mag_preproc', nifti_gz_format)},
            wall_time=25,
            requirements=[ants_req.v('2.0')])

        merge_trans = pipeline.add(
            'merge_transforms',
            Merge(2),
            inputs={
                'in1': (mni_reg, 'inv_warp'),
                'in2': (mni_reg, 'regmat')},
            wall_time=1)

        trans_flags = pipeline.add(
            'trans_flags',
            Merge(2,
                  in1=False,
                  in2=True),
            wall_time=1)

        apply_trans = pipeline.add(
            'ApplyTransform',
            ApplyTransforms(
                interpolation='NearestNeighbor',
                input_image_type=3),
            inputs={
                'input_image': ('template_mask', nifti_gz_format),
                'reference_image': ('mag_preproc', nifti_gz_format),
                'transforms': (merge_trans, 'out'),
                'invert_transform_flags': (trans_flags, 'out')},
            wall_time=7,
            mem_gb=24,
            requirements=[ants_req.v('2.0')])

        maths1 = pipeline.add(
            'binarize',
            fsl.ImageMaths(
                suffix='_optiBET_brain_mask',
                op_string='-bin',
                output_type='NIFTI_GZ'),
            inputs={
                'in_file': (apply_trans, 'output_image')},
            outputs={
                'brain_mask': ('out_file', nifti_gz_format)},
            wall_time=5,
            requirements=[fsl_req.v('5.0.8')])

        maths2 = pipeline.add(
            'mask',
            fsl.ImageMaths(
                suffix='_optiBET_brain',
                op_string='-mas',
                output_type='NIFTI_GZ'),
            inputs={
                'in_file': ('mag_preproc', nifti_gz_format),
                'in_file2': (maths1, 'out_file')},
            outputs={
                'brain': ('out_file', nifti_gz_format)},
            wall_time=5,
            requirements=[fsl_req.v('5.0.8')])

        if self.branch('optibet_gen_report'):
            pipeline.add(
                'slices',
                FSLSlices(
                    outname='optiBET_report',
                    output_type='NIFTI_GZ'),
                wall_time=5,
                inputs={
                    'im1': ('mag_preproc', nifti_gz_format),
                    'im2': (maths2, 'out_file')},
                outputs={
                    'optiBET_report': ('report', gif_format)},
                requirements=[fsl_req.v('5.0.8')])

        return pipeline

    # @UnusedVariable @IgnorePep8
    def _fnirt_to_tmpl_pipeline(self, **name_maps):
        """
        Registers a MR scan to a refernce MR scan using FSL's nonlinear FNIRT
        command

        Parameters
        ----------
        template : Which template to use, can be one of 'mni_nl6'
        """
        pipeline = self.new_pipeline(
            name='mag_coreg_to_tmpl',
            name_maps=name_maps,
            desc=("Nonlinearly registers a MR scan to a standard space,"
                  "e.g. MNI-space"),
            citations=[fsl_cite])

        # Basic reorientation to standard MNI space
        reorient = pipeline.add(
            'reorient',
            Reorient2Std(
                output_type='NIFTI_GZ'),
            inputs={
                'in_file': ('mag_preproc', nifti_gz_format)},
            requirements=[fsl_req.v('5.0.8')])

        reorient_mask = pipeline.add(
            'reorient_mask',
            Reorient2Std(
                output_type='NIFTI_GZ'),
            inputs={
                'in_file': ('brain_mask', nifti_gz_format)},
            requirements=[fsl_req.v('5.0.8')])

        reorient_brain = pipeline.add(
            'reorient_brain',
            Reorient2Std(
                output_type='NIFTI_GZ'),
            inputs={
                'in_file': ('brain', nifti_gz_format)},
            requirements=[fsl_req.v('5.0.8')])

        # Affine transformation to MNI space
        flirt = pipeline.add(
            'flirt',
            interface=FLIRT(
                dof=12,
                output_type='NIFTI_GZ'),
            inputs={
                'reference': ('template_brain', nifti_gz_format),
                'in_file': (reorient_brain, 'out_file')},
            requirements=[fsl_req.v('5.0.8')],
            wall_time=5)

        # Apply mask if corresponding subsampling scheme is 1
        # (i.e. 1-to-1 resolution) otherwise don't.
        apply_mask = [int(s == 1)
                      for s in self.parameter('fnirt_subsampling')]
        # Nonlinear transformation to MNI space
        pipeline.add(
            'fnirt',
            interface=FNIRT(
                output_type='NIFTI_GZ',
                intensity_mapping_model=(
                    self.parameter('fnirt_intensity_model')
                    if self.parameter('fnirt_intensity_model') is not None else
                    'none'),
                subsampling_scheme=self.parameter('fnirt_subsampling'),
                fieldcoeff_file=True,
                in_fwhm=[8, 6, 5, 4, 3, 2],  # [8, 6, 5, 4.5, 3, 2] This threw an error because of float value @IgnorePep8,
                ref_fwhm=[8, 6, 5, 4, 2, 0],
                regularization_lambda=[300, 150, 100, 50, 40, 30],
                apply_intensity_mapping=[1, 1, 1, 1, 1, 0],
                max_nonlin_iter=[5, 5, 5, 5, 5, 10],
                apply_inmask=apply_mask,
                apply_refmask=apply_mask),
            inputs={
                'ref_file': ('template', nifti_gz_format),
                'refmask': ('template_mask', nifti_gz_format),
                'in_file': (reorient, 'out_file'),
                'inmask_file': (reorient_mask, 'out_file'),
                'affine_file': (flirt, 'out_matrix_file')},
            outputs={
                'mag_coreg_to_tmpl': ('warped_file', nifti_gz_format),
                'coreg_to_tmpl_fsl_coeff': ('fieldcoeff_file',
                                             nifti_gz_format)},
            requirements=[fsl_req.v('5.0.8')],
            wall_time=60)
        # Set registration parameters
        # TODO: Need to work out which parameters to use
        return pipeline

    def _ants_to_tmpl_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            name='mag_coreg_to_tmpl',
            name_maps=name_maps,
            desc=("Nonlinearly registers a MR scan to a standard space,"
                  "e.g. MNI-space"),
            citations=[fsl_cite])

        pipeline.add(
            'Struct2MNI_reg',
            AntsRegSyn(
                num_dimensions=3,
                transformation='s',
                num_threads=4),
            inputs={
                'input_file': (self.brain_spec_name, nifti_gz_format),
                'ref_file': ('template_brain', nifti_gz_format)},
            outputs={
                'mag_coreg_to_tmpl': ('reg_file', nifti_gz_format),
                'coreg_to_tmpl_ants_mat': ('regmat', text_matrix_format),
                'coreg_to_tmpl_ants_warp': ('warp_file', nifti_gz_format)},
            wall_time=25,
            requirements=[ants_req.v('2.0')])

#         ants_reg = pipeline.add(
#             'ants_reg',
#             ants.Registration(
#                 dimension=3,
#                 collapse_output_transforms=True,
#                 float=False,
#                 interpolation='Linear',
#                 use_histogram_matching=False,
#                 winsorize_upper_quantile=0.995,
#                 winsorize_lower_quantile=0.005,
#                 verbose=True,
#                 transforms=['Rigid', 'Affine', 'SyN'],
#                 transform_parameters=[(0.1,), (0.1,), (0.1, 3, 0)],
#                 metric=['MI', 'MI', 'CC'],
#                 metric_weight=[1, 1, 1],
#                 radius_or_number_of_bins=[32, 32, 32],
#                 sampling_strategy=['Regular', 'Regular', 'None'],
#                 sampling_percentage=[0.25, 0.25, None],
#                 number_of_iterations=[[1000, 500, 250, 100],
#                                       [1000, 500, 250, 100],
#                                       [100, 70, 50, 20]],
#                 convergence_threshold=[1e-6, 1e-6, 1e-6],
#                 convergence_window_size=[10, 10, 10],
#                 shrink_factors=[[8, 4, 2, 1],
#                                 [8, 4, 2, 1],
#                                 [8, 4, 2, 1]],
#                 smoothing_sigmas=[[3, 2, 1, 0],
#                                   [3, 2, 1, 0],
#                                   [3, 2, 1, 0]],
#                 output_warped_image=True),
#             inputs={
#                 'fixed_image': ('template_brain', nifti_gz_format),
#                 'moving_image': (self.brain_spec_name, nifti_gz_format)},
#             outputs={
#                 'mag_coreg_to_tmpl': ('warped_image', nifti_gz_format)},
#             wall_time=25,
#             requirements=[ants_req.v('2.0')])
#
#         select_trans = pipeline.add(
#             'select',
#             SelectOne(
#                 index=1),
#             inputs={
#                 'inlist': (ants_reg, 'forward_transforms')},
#             outputs={
#                 'coreg_to_tmpl_ants_mat': ('out', text_matrix_format)})
#
#         pipeline.add(
#             'select_warp',
#             SelectOne(
#                 index=0),
#             inputs={
#                 'inlist': (ants_reg, 'forward_transforms')},
#             outputs={
#                 'coreg_to_tmpl_ants_warp': ('out', nifti_gz_format)})
#
#         pipeline.add(
#             'slices',
#             FSLSlices(
#                 outname='coreg_to_tmpl_report'),
#             inputs={
#                 'im1': ('template', nifti_gz_format),
#                 'im2': (select_trans, 'out')},
#             outputs={
#                 'coreg_to_tmpl_fsl_report': ('report', gif_format)},
#             wall_time=1,
#             requirements=[fsl_req.v('5.0.8')])

        return pipeline

    def prepare_pipeline(self, **name_maps):
        """
        Performs basic preprocessing, such as swapping dimensions into
        standard orientation and resampling (if required)

        Parameters
        -------
        new_dims : tuple(str)[3]
            A 3-tuple with the new orientation of the image (see FSL
            swap dim)
        resolution : list(float)[3] | None
            New resolution of the image. If None no resampling is
            performed
        """
        pipeline = self.new_pipeline(
            name='prepare_pipeline',
            name_maps=name_maps,
            desc=("Dimensions swapping to ensure that all the images "
                  "have the same orientations."),
            citations=[fsl_cite])

        if (self.branch('reorient_to_std') or
                self.parameter('resampled_resolution') is not None):
            if self.branch('reorient_to_std'):
                swap = pipeline.add(
                    'fslreorient2std',
                    fsl.utils.Reorient2Std(
                        output_type='NIFTI_GZ'),
                    inputs={
                        'in_file': ('magnitude', nifti_gz_format)},
                    requirements=[fsl_req.v('5.0.9')])
    #         swap.inputs.new_dims = self.parameter('reoriented_dims')

            if self.parameter('resampled_resolution') is not None:
                resample = pipeline.add(
                    "resample",
                    MRResize(
                        voxel=self.parameter('resampled_resolution')),
                    inputs={
                        'in_file': (swap, 'out_file')},
                    requirements=[mrtrix_req.v('3.0rc3')])
                pipeline.connect_output('mag_preproc', resample, 'out_file',
                                        nifti_gz_format)
            else:
                pipeline.connect_output('mag_preproc', swap, 'out_file',
                                        nifti_gz_format)
        else:
            # Don't actually do any processing just copy magnitude image to
            # preproc
            pipeline.add(
                'identity',
                IdentityInterface(
                    ['file']),
                inputs={
                    'file': ('magnitude', nifti_gz_format)},
                outputs={
                    'mag_preproc': ('file', nifti_gz_format)})

        return pipeline

    def header_extraction_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            name='header_extraction',
            name_maps=name_maps,
            desc=("Pipeline to extract the most important scan "
                  "information from the image header"),
            citations=[])

        input_format = self.input(self.header_image_spec_name).format

        if input_format == dicom_format:

            pipeline.add(
                'hd_info_extraction',
                DicomHeaderInfoExtraction(
                    multivol=False),
                inputs={
                    'dicom_folder': (self.header_image_spec_name, dicom_format)},
                outputs={
                    'tr': ('tr', float),
                    'start_time': ('start_time', str),
                    'total_duration': ('total_duration', str),
                    'real_duration': ('real_duration', str),
                    'ped': ('ped', str),
                    'pe_angle': ('pe_angle', str),
                    'echo_times': ('echo_times', float),
                    'voxel_sizes': ('voxel_sizes', float),
                    'main_field_strength': ('B0', float),
                    'main_field_orient': ('H', float)})

        elif input_format == nifti_gz_x_format:

            pipeline.add(
                'hd_info_extraction',
                NiftixHeaderInfoExtraction(),
                inputs={
                    'in_file': (self.header_image_spec_name, nifti_gz_x_format)},
                outputs={
                    'tr': ('tr', float),
                    'start_time': ('start_time', str),
                    'total_duration': ('total_duration', str),
                    'real_duration': ('real_duration', str),
                    'ped': ('ped', str),
                    'pe_angle': ('pe_angle', str),
                    'echo_times': ('echo_times', float),
                    'voxel_sizes': ('voxel_sizes', float),
                    'main_field_strength': ('B0', float),
                    'main_field_orient': ('H', float)})
        else:
            raise BananaUsageError(
                "Can only extract header info if 'magnitude' fileset "
                "is provided in DICOM or extended NIfTI format (provided {})"
                .format(self.input(self.header_image_spec_name).format))

        return pipeline

    def motion_mat_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            name='motion_mat_calculation',
            name_maps=name_maps,
            desc=("Motion matrices calculation"),
            citations=[fsl_cite])

        mm = pipeline.add(
            'motion_mats',
            MotionMatCalculation(),
            outputs={
                'motion_mats': ('motion_mats', motion_mats_format)})
        if not self.spec('coreg_fsl_mat').derivable:
            logger.info("Cannot derive 'coreg_matrix' for {} required for "
                        "motion matrix calculation, assuming that it "
                        "is the reference study".format(self))
            mm.inputs.reference = True
            pipeline.connect_input('magnitude', mm, 'dummy_input')
        else:
            pipeline.connect_input('coreg_fsl_mat', mm, 'reg_mat',
                                   text_matrix_format)
            pipeline.connect_input('qform_mat', mm, 'qform_mat',
                                   text_matrix_format)
            if 'align_mats' in self.data_spec_names():
                pipeline.connect_input('align_mats', mm, 'align_mats',
                                       motion_mats_format)
        return pipeline
Example #10
0
File: base.py Project: amrka/banana
class MriStudy(Study, metaclass=StudyMetaClass):

    add_data_specs = [
        AcquiredFilesetSpec('magnitude',
                            STD_IMAGE_FORMATS,
                            desc=("Typically the primary scan acquired from "
                                  "the scanner for the given contrast")),
        AcquiredFilesetSpec(
            'coreg_ref',
            STD_IMAGE_FORMATS,
            desc=("A reference scan to coregister the primary scan to. Should "
                  "not be brain extracted"),
            optional=True),
        AcquiredFilesetSpec(
            'coreg_ref_brain',
            STD_IMAGE_FORMATS,
            desc=("A brain-extracted reference scan to coregister a brain-"
                  "extracted scan to. Note that the output of the "
                  "registration coreg_brain can also be derived by brain "
                  "extracting the output of coregistration performed "
                  "before brain extraction if 'coreg_ref' is provided"),
            optional=True),
        AcquiredFilesetSpec('channels', (multi_nifti_gz_format, zip_format),
                            optional=True,
                            desc=("Reconstructed complex image for each "
                                  "coil without standardisation.")),
        AcquiredFilesetSpec(
            'header_image',
            dicom_format,
            desc=
            ("A dataset that contains correct the header information for the "
             "acquired image. Used to copy geometry over preprocessed "
             "channels"),
            optional=True),
        FilesetSpec('channel_mags', multi_nifti_gz_format,
                    'preprocess_channels'),
        FilesetSpec('channel_phases', multi_nifti_gz_format,
                    'preprocess_channels'),
        FilesetSpec('preproc',
                    nifti_gz_format,
                    'preprocess_pipeline',
                    desc=("Performs basic preprocessing, such as realigning "
                          "image axis to a standard rotation")),
        FilesetSpec('brain',
                    nifti_gz_format,
                    'brain_extraction_pipeline',
                    desc="The brain masked image"),
        FilesetSpec('brain_mask',
                    nifti_gz_format,
                    'brain_extraction_pipeline',
                    desc="Mask of the brain"),
        FilesetSpec('coreg',
                    nifti_gz_format,
                    'linear_coreg_pipeline',
                    desc="Head image coregistered to 'coreg_ref'"),
        FilesetSpec('coreg_brain',
                    nifti_gz_format,
                    'coreg_brain_pipeline',
                    desc=("Either brain-extracted image coregistered to "
                          "'coreg_ref_brain' or a brain extraction of a "
                          "coregistered (incl. skull) image")),
        FilesetSpec('coreg_matrix', text_matrix_format,
                    'coreg_matrix_pipeline'),
        FilesetSpec('coreg_brain_matrix', text_matrix_format,
                    'coreg_brain_pipeline'),
        FilesetSpec('coreg_to_atlas', nifti_gz_format,
                    'coregister_to_atlas_pipeline'),
        FilesetSpec('coreg_to_atlas_coeff', nifti_gz_format,
                    'coregister_to_atlas_pipeline'),
        FilesetSpec('coreg_to_atlas_mat', text_matrix_format,
                    'coregister_to_atlas_pipeline'),
        FilesetSpec('coreg_to_atlas_warp', nifti_gz_format,
                    'coregister_to_atlas_pipeline'),
        FilesetSpec('coreg_to_atlas_report', gif_format,
                    'coregister_to_atlas_pipeline'),
        FilesetSpec('wm_seg', nifti_gz_format, 'segmentation_pipeline'),
        FilesetSpec('dcm_info',
                    text_format,
                    'header_extraction_pipeline',
                    desc=("Extracts ")),
        FilesetSpec('motion_mats', motion_mats_format, 'motion_mat_pipeline'),
        FilesetSpec('qformed', nifti_gz_format, 'qform_transform_pipeline'),
        FilesetSpec('qform_mat', text_matrix_format,
                    'qform_transform_pipeline'),
        FieldSpec('tr', float, 'header_extraction_pipeline'),
        FieldSpec('echo_times',
                  float,
                  'header_extraction_pipeline',
                  array=True),
        FieldSpec('voxel_sizes',
                  float,
                  'header_extraction_pipeline',
                  array=True),
        FieldSpec('main_field_orient',
                  float,
                  'header_extraction_pipeline',
                  array=True),
        FieldSpec('main_field_strength', float, 'header_extraction_pipeline'),
        FieldSpec('start_time', float, 'header_extraction_pipeline'),
        FieldSpec('real_duration', float, 'header_extraction_pipeline'),
        FieldSpec('total_duration', float, 'header_extraction_pipeline'),
        FieldSpec('ped', str, 'header_extraction_pipeline'),
        FieldSpec('pe_angle', float, 'header_extraction_pipeline'),
        # Templates
        AcquiredFilesetSpec('atlas',
                            STD_IMAGE_FORMATS,
                            frequency='per_study',
                            default=FslAtlas('MNI152_T1',
                                             resolution='atlas_resolution')),
        AcquiredFilesetSpec('atlas_brain',
                            STD_IMAGE_FORMATS,
                            frequency='per_study',
                            default=FslAtlas('MNI152_T1',
                                             resolution='atlas_resolution',
                                             dataset='brain')),
        AcquiredFilesetSpec('atlas_mask',
                            STD_IMAGE_FORMATS,
                            frequency='per_study',
                            default=FslAtlas('MNI152_T1',
                                             resolution='atlas_resolution',
                                             dataset='brain_mask'))
    ]

    add_param_specs = [
        SwitchSpec('reorient_to_std', True),
        ParameterSpec('force_channel_flip',
                      None,
                      dtype=str,
                      array=True,
                      desc=("Forcibly flip channel inputs during preprocess "
                            "channels to correct issues with channel recon. "
                            "The inputs are passed directly through to FSL's "
                            "swapdims (see fsl.SwapDimensions interface)")),
        SwitchSpec('bet_robust', True),
        ParameterSpec('bet_f_threshold', 0.5),
        SwitchSpec('bet_reduce_bias',
                   False,
                   desc="Only used if not 'bet_robust'"),
        ParameterSpec('bet_g_threshold', 0.0),
        SwitchSpec('bet_method', 'fsl_bet', ('fsl_bet', 'optibet')),
        SwitchSpec('optibet_gen_report', False),
        SwitchSpec('atlas_coreg_tool', 'ants', ('fnirt', 'ants')),
        ParameterSpec('atlas_resolution', 2),  # choices=(0.5, 1, 2)),
        ParameterSpec('fnirt_intensity_model', 'global_non_linear_with_bias'),
        ParameterSpec('fnirt_subsampling', [4, 4, 2, 2, 1, 1]),
        ParameterSpec('preproc_new_dims', ('RL', 'AP', 'IS')),
        ParameterSpec('preproc_resolution', None, dtype=list),
        SwitchSpec('linear_coreg_method',
                   'flirt', ('flirt', 'spm', 'ants'),
                   desc="The tool to use for linear registration"),
        ParameterSpec(
            'flirt_degrees_of_freedom',
            6,
            desc=("Number of degrees of freedom used in the registration. "
                  "Default is 6 -> affine transformation.")),
        ParameterSpec(
            'flirt_cost_func',
            'normmi',
            desc=("Cost function used for the registration. Can be one of "
                  "'mutualinfo', 'corratio', 'normcorr', 'normmi', 'leastsq',"
                  " 'labeldiff', 'bbr'")),
        ParameterSpec(
            'flirt_qsform',
            False,
            desc=("Whether to use the QS form supplied in the input image "
                  "header (the image coordinates of the FOV supplied by the "
                  "scanner")),
        ParameterSpec(
            'channel_fname_regex',
            r'.*_(?P<channel>\d+)_(?P<echo>\d+)_(?P<axis>[A-Z]+)\.nii\.gz',
            desc=("The regular expression to extract channel, echo and complex"
                  " axis from the filenames of the coils channel images")),
        ParameterSpec(
            'channel_real_label',
            'REAL',
            desc=("The name of the real axis extracted from the channel "
                  "filename")),
        ParameterSpec(
            'channel_imag_label',
            'IMAGINARY',
            desc=("The name of the real axis extracted from the channel "
                  "filename"))
    ]

    def preprocess_channels(self, **name_maps):
        pipeline = self.new_pipeline(
            'preprocess_channels',
            name_maps=name_maps,
            desc=("Convert channel signals in complex coords to polar coords "
                  "and combine"))

        if (self.provided('header_image') or self.branch('reorient_to_std')
                or self.parameter('force_channel_flip') is not None):
            # Read channel files reorient them into standard space and then
            # write back to directory
            list_channels = pipeline.add(
                'list_channels',
                ListDir(),
                inputs={'directory': ('channels', multi_nifti_gz_format)})

            if self.parameter('force_channel_flip') is not None:
                force_flip = pipeline.add(
                    'flip_dims',
                    fsl.SwapDimensions(
                        new_dims=tuple(self.parameter('force_channel_flip'))),
                    connect={'in_file': (list_channels, 'files')},
                    iterfield=['in_file'])
                geom_dest_file = (force_flip, 'out_file')
            else:
                geom_dest_file = (list_channels, 'files')

            if self.provided('header_image'):
                # If header image is provided stomp its geometry over the
                # acquired channels
                copy_geom = pipeline.add(
                    'qsm_copy_geometry',
                    fsl.CopyGeom(),
                    inputs={'in_file': ('header_image', nifti_gz_format)},
                    connect={'dest_file': geom_dest_file},
                    iterfield=(['dest_file']),
                    requirements=[fsl_req.v('5.0.8')])
                reorient_in_file = (copy_geom, 'out_file')
            else:
                reorient_in_file = geom_dest_file

            if self.branch('reorient_to_std'):
                reorient = pipeline.add(
                    'reorient_channel',
                    fsl.Reorient2Std(output_type='NIFTI_GZ'),
                    connect={'in_file': reorient_in_file},
                    iterfield=['in_file'],
                    requirements=[fsl_req.v('5.0.8')])
                copy_to_dir_in_files = (reorient, 'out_file')
            else:
                copy_to_dir_in_files = reorient_in_file

            copy_to_dir = pipeline.add('copy_to_dir',
                                       CopyToDir(),
                                       connect={
                                           'in_files': copy_to_dir_in_files,
                                           'file_names':
                                           (list_channels, 'files')
                                       })
            to_polar_in = {'connect': {'in_dir': (copy_to_dir, 'out_dir')}}
        else:
            to_polar_in = {
                'inputs': {
                    'in_dir': ('channels', multi_nifti_gz_format)
                }
            }

        pipeline.add('to_polar',
                     ToPolarCoords(
                         in_fname_re=self.parameter('channel_fname_regex'),
                         real_label=self.parameter('channel_real_label'),
                         imaginary_label=self.parameter('channel_imag_label')),
                     outputs={
                         'magnitudes_dir':
                         ('channel_mags', multi_nifti_gz_format),
                         'phases_dir':
                         ('channel_phases', multi_nifti_gz_format)
                     },
                     **to_polar_in)

        return pipeline

    @property
    def brain_spec_name(self):
        """
        The name of the brain extracted image after registration has been
        applied if registration is specified by supplying 'coreg_ref' or
        'coreg_ref_brain' optional inputs.
        """
        if self.provided('coreg_ref') or self.provided('coreg_ref_brain'):
            name = 'coreg_brain'
        else:
            name = 'brain'
        return name

    def linear_coreg_pipeline(self, **name_maps):
        if self.branch('linear_coreg_method', 'flirt'):
            pipeline = self._flirt_pipeline(**name_maps)
        elif self.branch('linear_coreg_method', 'ants'):
            pipeline = self._ants_linear_coreg_pipeline(**name_maps)
        elif self.branch('linear_coreg_method', 'spm'):
            pipeline = self._spm_linear_coreg_pipeline(**name_maps)
        else:
            self.unhandled_branch('linear_coreg_method')
        return pipeline

    def brain_extraction_pipeline(self, **name_maps):
        if self.branch('bet_method', 'fsl_bet'):
            pipeline = self._bet_brain_extraction_pipeline(**name_maps)
        elif self.branch('bet_method', 'optibet'):
            pipeline = self._optiBET_brain_extraction_pipeline(**name_maps)
        else:
            self.unhandled_branch('bet_method')
        return pipeline

    def coreg_brain_pipeline(self, **name_maps):
        """
        Coregistered + brain-extracted images can be derived in 2-ways. If an
        explicit brain-extracted reference is provided to
        'coreg_ref_brain' then that is used to coregister a brain extracted
        image against. Alternatively, if only a skull-included reference is
        provided then the registration is performed with skulls-included and
        then brain extraction is performed after
        """
        if self.provided('coreg_ref_brain'):
            pipeline = self.linear_coreg_pipeline(
                name='linear_coreg_brain',
                input_map={
                    'preproc': 'brain',
                    'coreg_ref': 'coreg_ref_brain'
                },
                output_map={'coreg': 'coreg_brain'},
                name_maps=name_maps)
        elif self.provided('coreg_ref'):
            pipeline = self.brain_extraction_pipeline(
                name='linear_coreg_brain',
                input_map={'preproc': 'coreg'},
                output_map={'brain': 'coreg_brain'},
                name_maps=name_maps)
        else:
            raise ArcanaUsageError(
                "Either 'coreg_ref' or 'coreg_ref_brain' needs to be provided "
                "in order to derive coreg_brain")
        return pipeline

    def coreg_matrix_pipeline(self, **name_maps):
        if self.provided('coreg_ref_brain'):
            pipeline = self.coreg_brain_pipeline(**name_maps)
        elif self.provided('coreg_ref'):
            pipeline = self.linear_coreg_pipeline(**name_maps)
        else:
            raise ArcanaUsageError(
                "'coreg_matrix' can only be derived if 'coreg_ref' or "
                "'coreg_ref_brain' is provided to {}".format(self))
        return pipeline

    def coregister_to_atlas_pipeline(self, **name_maps):
        if self.branch('atlas_coreg_tool', 'fnirt'):
            pipeline = self._fnirt_to_atlas_pipeline(**name_maps)
        elif self.branch('atlas_coreg_tool', 'ants'):
            pipeline = self._ants_to_atlas_pipeline(**name_maps)
        else:
            self.unhandled_branch('atlas_coreg_tool')
        return pipeline

    def _flirt_linear_coreg_pipeline(self, **name_maps):
        """
        Registers a MR scan to a refernce MR scan using FSL's FLIRT command
        """

        pipeline = self.new_pipeline(
            name='linear_reg',
            name_maps=name_maps,
            desc="Registers a MR scan against a reference image using FLIRT",
            references=[fsl_cite])

        pipeline.add('flirt',
                     FLIRT(dof=self.parameter('flirt_degrees_of_freedom'),
                           cost=self.parameter('flirt_cost_func'),
                           cost_func=self.parameter('flirt_cost_func'),
                           output_type='NIFTI_GZ'),
                     inputs={
                         'in_file': ('preproc', nifti_gz_format),
                         'reference': ('coreg_ref', nifti_gz_format)
                     },
                     outputs={
                         'out_file': ('coreg', nifti_gz_format),
                         'out_matrix_file':
                         ('coreg_matrix', text_matrix_format)
                     },
                     requirements=[fsl_req.v('5.0.8')],
                     wall_time=5)

        return pipeline

    def _ants_linear_coreg_pipeline(self, **name_maps):
        """
        Registers a MR scan to a refernce MR scan using ANTS's linear_reg
        command
        """

        pipeline = self.new_pipeline(
            name='linear_coreg',
            name_maps=name_maps,
            desc="Registers a MR scan against a reference image using ANTs")

        pipeline.add('ANTs_linear_Reg',
                     AntsRegSyn(num_dimensions=3,
                                transformation='r',
                                out_prefix='reg2hires'),
                     inputs={
                         'ref_file': ('coreg_ref', nifti_gz_format),
                         'input_file': ('preproc', nifti_gz_format)
                     },
                     outputs={
                         'reg_file': ('coreg', nifti_gz_format),
                         'regmat': ('coreg_matrix', text_matrix_format)
                     },
                     wall_time=10,
                     requirements=[ants_req.v('2.0')])

        return pipeline

    def _spm_linear_coreg_pipeline(self, **name_maps):  # @UnusedVariable
        """
        Coregisters T2 image to T1 image using SPM's "Register" method.

        NB: Default values come from the W2MHS toolbox
        """
        pipeline = self.new_pipeline(
            'linear_coreg',
            name_maps=name_maps,
            desc="Coregister T2-weighted images to T1",
            references=[spm_cite])

        pipeline.add('coreg',
                     Coregister(jobtype='estwrite',
                                cost_function='nmi',
                                separation=[4, 2],
                                tolerance=[
                                    0.02, 0.02, 0.02, 0.001, 0.001, 0.001,
                                    0.01, 0.01, 0.01, 0.001, 0.001, 0.001
                                ],
                                fwhm=[7, 7],
                                write_interp=4,
                                write_wrap=[0, 0, 0],
                                write_mask=False,
                                out_prefix='r'),
                     inputs={
                         'target': ('coreg_ref', nifti_format),
                         'source': ('preproc', nifti_format)
                     },
                     outputs={'coregistered_source': ('coreg', nifti_format)},
                     requirements=[spm_req.v(12)],
                     wall_time=30)
        return pipeline

    def qform_transform_pipeline(self, **name_maps):
        pipeline = self.new_pipeline(
            name='qform_transform',
            name_maps=name_maps,
            desc="Registers a MR scan against a reference image",
            references=[fsl_cite])

        pipeline.add('flirt',
                     FLIRT(uses_qform=True, apply_xfm=True),
                     inputs={
                         'in_file': ('brain', nifti_gz_format),
                         'reference': ('coreg_ref_brain', nifti_gz_format)
                     },
                     outputs={
                         'out_file': ('qformed', nifti_gz_format),
                         'out_matrix_file': ('qform_mat', text_matrix_format)
                     },
                     requirements=[fsl_req.v('5.0.8')],
                     wall_time=5)

        return pipeline

    def _bet_brain_extraction_pipeline(self, **name_maps):
        """
        Generates a whole brain mask using FSL's BET command.
        """
        pipeline = self.new_pipeline(
            name='brain_extraction',
            name_maps=name_maps,
            desc="Generate brain mask from mr_scan",
            references=[fsl_cite, bet_cite, bet2_cite])
        # Create mask node
        bet = pipeline.add(
            "bet",
            fsl.BET(mask=True,
                    output_type='NIFTI_GZ',
                    frac=self.parameter('bet_f_threshold'),
                    vertical_gradient=self.parameter('bet_g_threshold')),
            inputs={'in_file': ('preproc', nifti_gz_format)},
            outputs={
                'out_file': ('brain', nifti_gz_format),
                'mask_file': ('brain_mask', nifti_gz_format)
            },
            requirements=[fsl_req.v('5.0.9')])
        # Set either robust or reduce bias
        if self.branch('bet_robust'):
            bet.inputs.robust = True
        else:
            bet.inputs.reduce_bias = self.parameter('bet_reduce_bias')
        return pipeline

    def _optiBET_brain_extraction_pipeline(self, **name_maps):
        """
        Generates a whole brain mask using a modified optiBET approach.
        """
        pipeline = self.new_pipeline(
            name='brain_extraction',
            name_maps=name_maps,
            desc=("Modified implementation of optiBET.sh"),
            references=[fsl_cite])

        mni_reg = pipeline.add('T1_reg',
                               AntsRegSyn(num_dimensions=3,
                                          transformation='s',
                                          out_prefix='T12MNI',
                                          num_threads=4),
                               inputs={
                                   'ref_file': ('atlas', nifti_gz_format),
                                   'input_file': ('preproc', nifti_gz_format)
                               },
                               wall_time=25,
                               requirements=[ants_req.v('2.0')])

        merge_trans = pipeline.add('merge_transforms',
                                   Merge(2),
                                   connect={
                                       'in1': (mni_reg, 'inv_warp'),
                                       'in2': (mni_reg, 'regmat')
                                   },
                                   wall_time=1)

        trans_flags = pipeline.add('trans_flags',
                                   Merge(2, in1=False, in2=True),
                                   wall_time=1)

        apply_trans = pipeline.add(
            'ApplyTransform',
            ApplyTransforms(interpolation='NearestNeighbor',
                            input_image_type=3),
            inputs={
                'input_image': ('atlas_mask', nifti_gz_format),
                'reference_image': ('preproc', nifti_gz_format)
            },
            connect={
                'transforms': (merge_trans, 'out'),
                'invert_transform_flags': (trans_flags, 'out')
            },
            wall_time=7,
            mem_gb=24,
            requirements=[ants_req.v('2.0')])

        maths1 = pipeline.add(
            'binarize',
            fsl.ImageMaths(suffix='_optiBET_brain_mask', op_string='-bin'),
            connect={'in_file': (apply_trans, 'output_image')},
            outputs={'out_file': ('brain_mask', nifti_gz_format)},
            wall_time=5,
            requirements=[fsl_req.v('5.0.8')])

        maths2 = pipeline.add('mask',
                              fsl.ImageMaths(suffix='_optiBET_brain',
                                             op_string='-mas'),
                              inputs={'in_file': ('preproc', nifti_gz_format)},
                              connect={'in_file2': (maths1, 'out_file')},
                              outputs={'out_file': ('brain', nifti_gz_format)},
                              wall_time=5,
                              requirements=[fsl_req.v('5.0.8')])

        if self.branch('optibet_gen_report'):
            pipeline.add('slices',
                         FSLSlices(outname='optiBET_report'),
                         wall_time=5,
                         inputs={'im1': ('preproc', nifti_gz_format)},
                         connect={'im2': (maths2, 'out_file')},
                         outputs={'report': ('optiBET_report', gif_format)},
                         requirements=[fsl_req.v('5.0.8')])

        return pipeline

    # @UnusedVariable @IgnorePep8
    def _fnirt_to_atlas_pipeline(self, **name_maps):
        """
        Registers a MR scan to a refernce MR scan using FSL's nonlinear FNIRT
        command

        Parameters
        ----------
        atlas : Which atlas to use, can be one of 'mni_nl6'
        """
        pipeline = self.new_pipeline(
            name='coregister_to_atlas',
            name_maps=name_maps,
            desc=("Nonlinearly registers a MR scan to a standard space,"
                  "e.g. MNI-space"),
            references=[fsl_cite])

        # Basic reorientation to standard MNI space
        # FIXME: Don't think is necessary any more since preproc should be
        #        in standard orientation
        reorient = pipeline.add(
            'reorient',
            Reorient2Std(output_type='NIFTI_GZ'),
            inputs={'in_file': ('preproc', nifti_gz_format)},
            requirements=[fsl_req.v('5.0.8')])

        reorient_mask = pipeline.add(
            'reorient_mask',
            Reorient2Std(output_type='NIFTI_GZ'),
            inputs={'in_file': ('brain_mask', nifti_gz_format)},
            requirements=[fsl_req.v('5.0.8')])

        reorient_brain = pipeline.create_node(
            'reorient_brain',
            Reorient2Std(output_type='NIFTI_GZ'),
            inputs={'in_file': ('brain', nifti_gz_format)},
            requirements=[fsl_req.v('5.0.8')])

        # Affine transformation to MNI space
        flirt = pipeline.add(
            'flirt',
            interface=FLIRT(dof=12, output_type='NIFTI_GZ'),
            inputs={'reference': ('atlas_brain', nifti_gz_format)},
            connect={'in_file': (reorient_brain, 'out_file')},
            requirements=[fsl_req.v('5.0.8')],
            wall_time=5)

        # Apply mask if corresponding subsampling scheme is 1
        # (i.e. 1-to-1 resolution) otherwise don't.
        apply_mask = [int(s == 1) for s in self.parameter('fnirt_subsampling')]
        # Nonlinear transformation to MNI space
        pipeline.add(
            'fnirt',
            interface=FNIRT(
                output_type='NIFTI_GZ',
                intensity_mapping_model=(
                    self.parameter('fnirt_intensity_model')
                    if self.parameter('fnirt_intensity_model') is not None else
                    'none'),
                subsampling_scheme=self.parameter('fnirt_subsampling'),
                fieldcoeff_file=True,
                in_fwhm=[
                    8, 6, 5, 4, 3, 2
                ],  # [8, 6, 5, 4.5, 3, 2] This threw an error because of float value @IgnorePep8,
                ref_fwhm=[8, 6, 5, 4, 2, 0],
                regularization_lambda=[300, 150, 100, 50, 40, 30],
                apply_intensity_mapping=[1, 1, 1, 1, 1, 0],
                max_nonlin_iter=[5, 5, 5, 5, 5, 10],
                apply_inmask=apply_mask,
                apply_refmask=apply_mask),
            inputs={
                'ref_file': ('atlas', nifti_gz_format),
                'refmask': ('atlas_mask', nifti_gz_format)
            },
            connect={
                'in_file': (reorient, 'out_file'),
                'inmask_file': (reorient_mask, 'out_file'),
                'affine_file': (flirt, 'out_matrix_file')
            },
            outputs={
                'warped_file': ('coreg_to_atlas', nifti_gz_format),
                'fieldcoeff_file': ('coreg_to_atlas_coeff', nifti_gz_format)
            },
            requirements=[fsl_req.v('5.0.8')],
            wall_time=60)
        # Set registration parameters
        # TODO: Need to work out which parameters to use
        return pipeline

    def _ants_to_atlas_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            name='coregister_to_atlas',
            name_maps=name_maps,
            desc=("Nonlinearly registers a MR scan to a standard space,"
                  "e.g. MNI-space"),
            references=[fsl_cite])

        ants_reg = pipeline.add('Struct2MNI_reg',
                                AntsRegSyn(num_dimensions=3,
                                           transformation='s',
                                           out_prefix='Struct2MNI',
                                           num_threads=4),
                                inputs={
                                    'input_file':
                                    (self.brain_spec_name, nifti_gz_format),
                                    'ref_file':
                                    ('atlas_brain', nifti_gz_format)
                                },
                                outputs={
                                    'reg_file':
                                    ('coreg_to_atlas', nifti_gz_format),
                                    'regmat':
                                    ('coreg_to_atlas_mat', text_matrix_format),
                                    'warp_file':
                                    ('coreg_to_atlas_warp', nifti_gz_format)
                                },
                                wall_time=25,
                                requirements=[ants_req.v('2.0')])

        pipeline.add('slices',
                     FSLSlices(outname='coreg_to_atlas_report'),
                     inputs={'im1': ('atlas', nifti_gz_format)},
                     connect={'im2': (ants_reg, 'reg_file')},
                     outputs={'report': ('coreg_to_atlas_report', gif_format)},
                     wall_time=1,
                     requirements=[fsl_req.v('5.0.8')])

        return pipeline

    def segmentation_pipeline(self, img_type=2, **name_maps):
        pipeline = self.new_pipeline(
            name='FAST_segmentation',
            name_maps=name_maps,
            inputs=[FilesetSpec('brain', nifti_gz_format)],
            outputs=[FilesetSpec('wm_seg', nifti_gz_format)],
            desc="White matter segmentation of the reference image",
            references=[fsl_cite])

        fast = pipeline.add('fast',
                            fsl.FAST(img_type=img_type,
                                     segments=True,
                                     out_basename='Reference_segmentation'),
                            inputs={'in_files': ('brain', nifti_gz_format)},
                            requirements=[fsl_req.v('5.0.9')]),

        # Determine output field of split to use
        if img_type == 1:
            split_output = 'out3'
        elif img_type == 2:
            split_output = 'out2'
        else:
            raise ArcanaUsageError(
                "'img_type' parameter can either be 1 or 2 (not {})".format(
                    img_type))

        pipeline.add('split',
                     Split(splits=[1, 1, 1], squeeze=True),
                     connect={'inlist': (fast, 'tissue_class_files')},
                     outputs={split_output: ('wm_seg', nifti_gz_format)})

        return pipeline

    def preprocess_pipeline(self, **name_maps):
        """
        Performs basic preprocessing, such as swapping dimensions into
        standard orientation and resampling (if required)

        Parameters
        -------
        new_dims : tuple(str)[3]
            A 3-tuple with the new orientation of the image (see FSL
            swap dim)
        resolution : list(float)[3] | None
            New resolution of the image. If None no resampling is
            performed
        """
        pipeline = self.new_pipeline(
            name='preprocess_pipeline',
            name_maps=name_maps,
            desc=("Dimensions swapping to ensure that all the images "
                  "have the same orientations."),
            references=[fsl_cite])

        if (self.branch('reorient_to_std')
                or self.parameter('preproc_resolution') is not None):
            if self.branch('reorient_to_std'):
                swap = pipeline.add(
                    'fslreorient2std',
                    fsl.utils.Reorient2Std(),
                    inputs={'in_file': ('magnitude', nifti_gz_format)},
                    requirements=[fsl_req.v('5.0.9')])

    #         swap.inputs.new_dims = self.parameter('preproc_new_dims')

            if self.parameter('preproc_resolution') is not None:
                resample = pipeline.add(
                    "resample",
                    MRResize(voxel=self.parameter('preproc_resolution')),
                    connect={'in_file': (swap, 'out_file')},
                    requirements=[mrtrix_req.v('3.0rc3')])
                pipeline.connect_output('preproc', resample, 'out_file',
                                        nifti_gz_format)
            else:
                pipeline.connect_output('preproc', swap, 'out_file',
                                        nifti_gz_format)
        else:
            # Don't actually do any processing just copy magnitude image to
            # preproc
            pipeline.add('identity',
                         IdentityInterface(['file']),
                         inputs={'file': ('magnitude', nifti_gz_format)},
                         outputs={'file': ('preproc', nifti_gz_format)})

        return pipeline

    def header_extraction_pipeline(self, **name_maps):
        if self.provided('header_image'):
            dcm_in_name = 'header_image'
        else:
            dcm_in_name = 'magnitude'
        if self.input(dcm_in_name).format != dicom_format:
            raise ArcanaUsageError(
                "Can only extract header info if 'magnitude' fileset "
                "is provided in DICOM format ({})".format(
                    self.input('magnitude').format))

        pipeline = self.new_pipeline(
            name='header_extraction',
            name_maps=name_maps,
            desc=("Pipeline to extract the most important scan "
                  "information from the image header"),
            references=[])

        pipeline.add('hd_info_extraction',
                     DicomHeaderInfoExtraction(multivol=False),
                     inputs={'dicom_folder': (dcm_in_name, dicom_format)},
                     outputs={
                         'tr': ('tr', float),
                         'start_time': ('start_time', str),
                         'total_duration': ('total_duration', str),
                         'real_duration': ('real_duration', str),
                         'ped': ('ped', str),
                         'pe_angle': ('pe_angle', str),
                         'dcm_info': ('dcm_info', text_format),
                         'echo_times': ('echo_times', float),
                         'voxel_sizes': ('voxel_sizes', float),
                         'B0': ('main_field_strength', float),
                         'H': ('main_field_orient', float)
                     })

        return pipeline

    def motion_mat_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(name='motion_mat_calculation',
                                     name_maps=name_maps,
                                     desc=("Motion matrices calculation"),
                                     references=[fsl_cite])

        mm = pipeline.add(
            'motion_mats',
            MotionMatCalculation(),
            outputs={'motion_mats': ('motion_mats', motion_mats_format)})
        if not self.spec('coreg_matrix').derivable:
            logger.info("Cannot derive 'coreg_matrix' for {} required for "
                        "motion matrix calculation, assuming that it "
                        "is the reference study".format(self))
            mm.inputs.reference = True
            pipeline.connect_input('magnitude', mm, 'dummy_input')
        else:
            pipeline.connect_input('coreg_matrix', mm, 'reg_mat',
                                   text_matrix_format)
            pipeline.connect_input('qform_mat', mm, 'qform_mat',
                                   text_matrix_format)
            if 'align_mats' in self.data_spec_names():
                pipeline.connect_input('align_mats', mm, 'align_mats',
                                       directory_format)
        return pipeline
Example #11
0
class EpiSeriesStudy(MriStudy, metaclass=StudyMetaClass):

    add_data_specs = [
        InputFilesetSpec('series',
                         STD_IMAGE_FORMATS,
                         desc=("The set of EPI volumes that make up the "
                               "series")),
        InputFilesetSpec('coreg_ref_wmseg', STD_IMAGE_FORMATS, optional=True),
        InputFilesetSpec('reverse_phase', STD_IMAGE_FORMATS, optional=True),
        InputFilesetSpec('field_map_mag', STD_IMAGE_FORMATS, optional=True),
        InputFilesetSpec('field_map_phase', STD_IMAGE_FORMATS, optional=True),
        FilesetSpec('magnitude',
                    nifti_gz_format,
                    'extract_magnitude_pipeline',
                    desc=("The magnitude image, typically extracted from "
                          "the provided series")),
        FilesetSpec('series_preproc', nifti_gz_format, 'preprocess_pipeline'),
        FilesetSpec('series_coreg', nifti_gz_format, 'series_coreg_pipeline'),
        FilesetSpec('moco', nifti_gz_format, 'intrascan_alignment_pipeline'),
        FilesetSpec('align_mats', motion_mats_format,
                    'intrascan_alignment_pipeline'),
        FilesetSpec('moco_par', par_format, 'intrascan_alignment_pipeline'),
        FieldSpec('field_map_delta_te', float, 'field_map_time_info_pipeline')
    ]

    add_param_specs = [
        SwitchSpec('bet_robust', True),
        MriStudy.param_spec('coreg_method').with_new_choices(
            'epireg', fallbacks={'epireg': 'flirt'}),
        ParamSpec('bet_f_threshold', 0.2),
        ParamSpec('bet_reduce_bias', False),
        ParamSpec('fugue_echo_spacing', 0.000275)
    ]

    @property
    def header_image_spec_name(self):
        if self.provided('header_image'):
            hdr_name = 'header_image'
        else:
            hdr_name = 'series'
        return hdr_name

    @property
    def series_preproc_spec_name(self):
        if self.is_coregistered:
            preproc = 'series_coreg'
        else:
            preproc = 'series_preproc'
        return preproc

    def coreg_pipeline(self, **name_maps):
        if self.branch('coreg_method', 'epireg'):
            pipeline = self._epireg_linear_coreg_pipeline(**name_maps)
        else:
            pipeline = super().coreg_pipeline(**name_maps)
        return pipeline

    def _epireg_linear_coreg_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            name='linear_coreg',
            desc=("Intra-subjects epi registration improved using white "
                  "matter boundaries."),
            citations=[fsl_cite],
            name_maps=name_maps)

        epireg = pipeline.add('epireg',
                              fsl.epi.EpiReg(out_base='epireg2ref',
                                             output_type='NIFTI_GZ'),
                              inputs={
                                  'epi': ('brain', nifti_gz_format),
                                  't1_brain':
                                  ('coreg_ref_brain', nifti_gz_format),
                                  't1_head': ('coreg_ref', nifti_gz_format)
                              },
                              outputs={
                                  'brain_coreg': ('out_file', nifti_gz_format),
                                  'coreg_fsl_mat':
                                  ('epi2str_mat', text_matrix_format)
                              },
                              requirements=[fsl_req.v('5.0.9')])

        if self.provided('coreg_ref_wmseg'):
            pipeline.connect_input('coreg_ref_wmseg', epireg, 'wmseg',
                                   nifti_gz_format)

        return pipeline

    def brain_coreg_pipeline(self, **name_maps):
        if self.branch('coreg_method', 'epireg'):
            pipeline = self.coreg_pipeline(
                name='brain_coreg',
                name_maps=dict(input_map={
                    'mag_preproc': 'brain',
                    'coreg_ref': 'coreg_ref_brain'
                },
                               output_map={'mag_coreg': 'brain_coreg'},
                               name_maps=name_maps))

            pipeline.add(
                'mask_transform',
                fsl.ApplyXFM(output_type='NIFTI_GZ', apply_xfm=True),
                inputs={
                    'in_matrix_file': (pipeline.node('epireg'), 'epi2str_mat'),
                    'in_file': ('brain_mask', nifti_gz_format),
                    'reference': ('coreg_ref_brain', nifti_gz_format)
                },
                outputs={'brain_mask_coreg': ('out_file', nifti_gz_format)},
                requirements=[fsl_req.v('5.0.10')],
                wall_time=10)
        else:
            pipeline = super().coreg_brain_pipeline(**name_maps)

        return pipeline

    def extract_magnitude_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            'extract_magnitude',
            desc="Extracts a single magnitude volume from a series",
            citations=[],
            name_maps=name_maps)

        pipeline.add("extract_first_vol",
                     MRConvert(coord=(3, 0)),
                     inputs={'in_file': ('series', nifti_gz_format)},
                     outputs={'magnitude': ('out_file', nifti_gz_format)},
                     requirements=[mrtrix_req.v('3.0rc3')])

        return pipeline

    def series_coreg_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            'series_coreg',
            desc="Applies coregistration transform to DW series",
            citations=[],
            name_maps=name_maps)

        if self.provided('coreg_ref'):
            coreg_ref = 'coreg_ref'
        elif self.provided('coreg_ref_brain'):
            coreg_ref = 'coreg_ref_brain'
        else:
            raise BananaUsageError(
                "Cannot coregister DW series as reference ('coreg_ref' or "
                "'coreg_ref_brain') has not been provided to {}".format(self))

        # Apply co-registration transformation to DW series
        pipeline.add('mask_transform',
                     fsl.ApplyXFM(output_type='NIFTI_GZ', apply_xfm=True),
                     inputs={
                         'in_matrix_file':
                         ('coreg_fsl_mat', text_matrix_format),
                         'in_file': ('series_preproc', nifti_gz_format),
                         'reference': (coreg_ref, nifti_gz_format)
                     },
                     outputs={'series_coreg': ('out_file', nifti_gz_format)},
                     requirements=[fsl_req.v('5.0.10')],
                     wall_time=10)

        return pipeline

    def intrascan_alignment_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(name='MCFLIRT_pipeline',
                                     desc=("Intra-epi volumes alignment."),
                                     citations=[fsl_cite],
                                     name_maps=name_maps)

        mcflirt = pipeline.add(
            'mcflirt',
            fsl.MCFLIRT(ref_vol=0,
                        save_mats=True,
                        save_plots=True,
                        output_type='NIFTI_GZ',
                        out_file='moco.nii.gz'),
            inputs={'in_file': ('mag_preproc', nifti_gz_format)},
            outputs={
                'moco': ('out_file', nifti_gz_format),
                'moco_par': ('par_file', par_format)
            },
            requirements=[fsl_req.v('5.0.9')])

        pipeline.add('merge',
                     MergeListMotionMat(),
                     inputs={'file_list': (mcflirt, 'mat_file')},
                     outputs={'align_mats': ('out_dir', motion_mats_format)})

        return pipeline

    def field_map_time_info_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            name='field_map_time_info_pipeline',
            desc=("Pipeline to extract delta TE from field map "
                  "images, if provided"),
            citations=[fsl_cite],
            name_maps=name_maps)

        pipeline.add('extract_delta_te',
                     FieldMapTimeInfo(),
                     inputs={'fm_mag': ('field_map_mag', dicom_format)},
                     outputs={'field_map_delta_te': ('delta_te', float)})

        return pipeline

    def preprocess_pipeline(self, **name_maps):

        if ('field_map_phase' in self.input_names
                and 'field_map_mag' in self.input_names):
            return self._fugue_pipeline(**name_maps)
        elif 'reverse_phase' in self.input_names:
            return self._topup_pipeline(**name_maps)
        else:
            return super().preprocess_pipeline(**name_maps)

    def _topup_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            name='preprocess_pipeline',
            desc=("Topup distortion correction pipeline"),
            citations=[fsl_cite],
            name_maps=name_maps)

        reorient_epi_in = pipeline.add(
            'reorient_epi_in',
            fsl.utils.Reorient2Std(),
            inputs={'in_file': ('series', nifti_gz_format)},
            requirements=[fsl_req.v('5.0.9')])

        reorient_epi_opposite = pipeline.add(
            'reorient_epi_opposite',
            fsl.utils.Reorient2Std(),
            inputs={'in_file': ('reverse_phase', nifti_gz_format)},
            requirements=[fsl_req.v('5.0.9')])

        prep_dwi = pipeline.add('prepare_dwi',
                                PrepareDWI(topup=True),
                                inputs={
                                    'pe_dir': ('ped', str),
                                    'ped_polarity': ('pe_angle', str),
                                    'dwi': (reorient_epi_in, 'out_file'),
                                    'dwi1': (reorient_epi_opposite, 'out_file')
                                })

        ped = pipeline.add('gen_config',
                           GenTopupConfigFiles(),
                           inputs={'ped': (prep_dwi, 'pe')})

        merge_outputs = pipeline.add('merge_files',
                                     merge_lists(2),
                                     inputs={
                                         'in1': (prep_dwi, 'main'),
                                         'in2': (prep_dwi, 'secondary')
                                     })

        merge = pipeline.add('fsl_merge',
                             fsl_merge(dimension='t', output_type='NIFTI_GZ'),
                             inputs={'in_files': (merge_outputs, 'out')},
                             requirements=[fsl_req.v('5.0.9')])

        topup = pipeline.add('topup',
                             TOPUP(output_type='NIFTI_GZ'),
                             inputs={
                                 'in_file': (merge, 'merged_file'),
                                 'encoding_file': (ped, 'config_file')
                             },
                             requirements=[fsl_req.v('5.0.9')])

        in_apply_tp = pipeline.add(
            'in_apply_tp',
            merge_lists(1),
            inputs={'in1': (reorient_epi_in, 'out_file')})

        pipeline.add(
            'applytopup',
            ApplyTOPUP(method='jac', in_index=[1], output_type='NIFTI_GZ'),
            inputs={
                'in_files': (in_apply_tp, 'out'),
                'encoding_file': (ped, 'apply_topup_config'),
                'in_topup_movpar': (topup, 'out_movpar'),
                'in_topup_fieldcoef': (topup, 'out_fieldcoef')
            },
            outputs={'series_preproc': ('out_corrected', nifti_gz_format)},
            requirements=[fsl_req.v('5.0.9')])

        return pipeline

    def _fugue_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            name='preprocess_pipeline',
            desc=("Fugue distortion correction pipeline"),
            citations=[fsl_cite],
            name_maps=name_maps)

        reorient_epi_in = pipeline.add(
            'reorient_epi_in',
            fsl.utils.Reorient2Std(output_type='NIFTI_GZ'),
            inputs={'in_file': ('series', nifti_gz_format)},
            requirements=[fsl_req.v('5.0.9')])

        fm_mag_reorient = pipeline.add(
            'reorient_fm_mag',
            fsl.utils.Reorient2Std(output_type='NIFTI_GZ'),
            inputs={'in_file': ('field_map_mag', nifti_gz_format)},
            requirements=[fsl_req.v('5.0.9')])

        fm_phase_reorient = pipeline.add(
            'reorient_fm_phase',
            fsl.utils.Reorient2Std(output_type='NIFTI_GZ'),
            inputs={'in_file': ('field_map_phase', nifti_gz_format)},
            requirements=[fsl_req.v('5.0.9')])

        bet = pipeline.add("bet",
                           BET(robust=True, output_type='NIFTI_GZ'),
                           inputs={'in_file': (fm_mag_reorient, 'out_file')},
                           wall_time=5,
                           requirements=[fsl_req.v('5.0.9')])

        create_fmap = pipeline.add(
            "prepfmap",
            PrepareFieldmap(
                # delta_TE=2.46
            ),
            inputs={
                'delta_TE': ('field_map_delta_te', float),
                "in_magnitude": (bet, "out_file"),
                'in_phase': (fm_phase_reorient, 'out_file')
            },
            wall_time=5,
            requirements=[fsl_req.v('5.0.9')])

        pipeline.add(
            'fugue',
            FUGUE(unwarp_direction='x',
                  dwell_time=self.parameter('fugue_echo_spacing'),
                  unwarped_file='example_func.nii.gz',
                  output_type='NIFTI_GZ'),
            inputs={
                'fmap_in_file': (create_fmap, 'out_fieldmap'),
                'in_file': (reorient_epi_in, 'out_file')
            },
            outputs={'series_preproc': ('unwarped_file', nifti_gz_format)},
            wall_time=5,
            requirements=[fsl_req.v('5.0.9')])

        return pipeline

    def motion_mat_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(name='motion_mat_calculation',
                                     desc=("Motion matrices calculation"),
                                     citations=[fsl_cite],
                                     name_maps=name_maps)

        mm = pipeline.add(
            'motion_mats',
            MotionMatCalculation(),
            inputs={
                'reg_mat': ('coreg_fsl_mat', text_matrix_format),
                'qform_mat': ('qform_mat', text_matrix_format)
            },
            outputs={'motion_mats': ('motion_mats', motion_mats_format)})
        if 'reverse_phase' not in self.input_names:
            pipeline.connect_input('align_mats', mm, 'align_mats',
                                   motion_mats_format)

        return pipeline
Example #12
0
class PetStudy(Study, metaclass=StudyMetaClass):

    add_param_specs = [
        ParamSpec('ica_n_components', 2),
        ParamSpec('ica_type', 'spatial'),
        ParamSpec('norm_transformation', 's'),
        ParamSpec('norm_dim', 3),
        ParamSpec('norm_template',
                  os.path.join(template_path, 'PET_template.nii.gz')),
        ParamSpec('crop_xmin', 100),
        ParamSpec('crop_xsize', 130),
        ParamSpec('crop_ymin', 100),
        ParamSpec('crop_ysize', 130),
        ParamSpec('crop_zmin', 20),
        ParamSpec('crop_zsize', 100),
        ParamSpec('image_orientation_check', False)
    ]

    add_data_specs = [
        InputFilesetSpec('list_mode', list_mode_format),
        InputFilesetSpec('registered_volumes', nifti_gz_format),
        InputFilesetSpec('pet_image', nifti_gz_format),
        InputFilesetSpec('pet_data_dir', directory_format),
        InputFilesetSpec('pet_recon_dir', directory_format),
        FilesetSpec('pet_recon_dir_prepared', directory_format,
                    'pet_data_preparation_pipeline'),
        FilesetSpec('decomposed_file', nifti_gz_format, 'ICA_pipeline'),
        FilesetSpec('timeseries', nifti_gz_format, 'ICA_pipeline'),
        FilesetSpec('mixing_mat', text_format, 'ICA_pipeline'),
        FilesetSpec('registered_volume', nifti_gz_format,
                    'Image_normalization_pipeline'),
        FilesetSpec('warp_file', nifti_gz_format,
                    'Image_normalization_pipeline'),
        FilesetSpec('invwarp_file', nifti_gz_format,
                    'Image_normalization_pipeline'),
        FilesetSpec('affine_mat', text_matrix_format,
                    'Image_normalization_pipeline'),
        FieldSpec('pet_duration', int, 'pet_time_info_extraction_pipeline'),
        FieldSpec('pet_end_time', str, 'pet_time_info_extraction_pipeline'),
        FieldSpec('pet_start_time', str, 'pet_time_info_extraction_pipeline'),
        InputFieldSpec('time_offset', int),
        InputFieldSpec('temporal_length', float),
        InputFieldSpec('num_frames', int),
        FilesetSpec('ssrb_sinograms', directory_format,
                    'sinogram_unlisting_pipeline')
    ]

    def ICA_pipeline(self, **kwargs):

        pipeline = self.new_pipeline(
            name='ICA',
            desc=('Decompose a 4D fileset into a set of independent '
                  'components using FastICA'),
            citations=[],
            **kwargs)

        pipeline.add(
            'ICA',
            FastICA(n_components=self.parameter('ica_n_components'),
                    ica_type=self.parameter('ica_type')),
            inputs={'volume': ('registered_volumes', nifti_gz_format)},
            ouputs={
                'decomposed_file': ('ica_decomposition', nifti_gz_format),
                'timeseries': ('ica_timeseries', nifti_gz_format),
                'mixing_mat': ('mixing_mat', text_format)
            })

        return pipeline

    def Image_normalization_pipeline(self, **kwargs):

        pipeline = self.new_pipeline(
            name='Image_registration',
            desc=('Image registration to a template using ANTs'),
            citations=[],
            **kwargs)

        pipeline.add('ANTs',
                     AntsRegSyn(
                         out_prefix='vol2template',
                         num_dimensions=self.parameter('norm_dim'),
                         num_threads=self.processor.num_processes,
                         transformation=self.parameter('norm_transformation'),
                         ref_file=self.parameter('norm_template')),
                     inputs={'input_file': ('pet_image', nifti_gz_format)},
                     ouputs={
                         'registered_volume': ('reg_file', nifti_gz_format),
                         'warp_file': ('warp_file', nifti_gz_format),
                         'invwarp_file': ('inv_warp', nifti_gz_format),
                         'affine_mat': ('regmat', text_matrix_format)
                     })

        return pipeline

    def pet_data_preparation_pipeline(self, **kwargs):

        pipeline = self.new_pipeline(
            name='pet_data_preparation',
            desc=("Given a folder with reconstructed PET data, this "
                  "pipeline will prepare the data for the motion "
                  "correction"),
            citations=[],
            **kwargs)

        pipeline.add('prepare_pet',
                     PreparePetDir(image_orientation_check=self.parameter(
                         'image_orientation_check')),
                     inputs={'pet_dir': ('pet_recon_dir', directory_format)},
                     ouputs={
                         'pet_recon_dir_prepared':
                         ('pet_dir_prepared', directory_format)
                     },
                     requirements=[mrtrix_req.v('3.0rc3'),
                                   fsl_req.v('5.0.9')])

        return pipeline

    def pet_time_info_extraction_pipeline(self, **kwargs):

        pipeline = self.new_pipeline(
            name='pet_info_extraction',
            desc=("Extract PET time info from list-mode header."),
            citations=[],
            **kwargs)

        pipeline.add(
            'PET_time_info',
            PetTimeInfo(),
            inputs={'pet_data_dir': ('pet_data_dir', directory_format)},
            ouputs={
                'pet_end_time': ('pet_end_time', float),
                'pet_start_time': ('pet_start_time', str),
                'pet_duration': ('pet_duration', int)
            })
        return pipeline

    def sinogram_unlisting_pipeline(self, **kwargs):

        pipeline = self.new_pipeline(
            name='prepare_sinogram',
            desc=('Unlist pet listmode data into several sinograms and '
                  'perform ssrb compression to prepare data for motion '
                  'detection using PCA pipeline.'),
            citations=[],
            **kwargs)

        if not self.provided('list_mode'):
            raise BananaUsageError(
                "'list_mode' was not provided as an input to the study "
                "so cannot perform sinogram unlisting")

        prepare_inputs = pipeline.add('prepare_inputs',
                                      PrepareUnlistingInputs(),
                                      inputs={
                                          'list_mode':
                                          ('list_mode', list_mode_format),
                                          'time_offset': ('time_offset', int),
                                          'num_frames': ('num_frames', int),
                                          'temporal_len':
                                          ('temporal_length', float)
                                      })

        unlisting = pipeline.add(
            'unlisting',
            PETListModeUnlisting(),
            inputs={'list_inputs': (prepare_inputs, 'out')},
            iterfield=['list_inputs'])

        ssrb = pipeline.add(
            'ssrb',
            SSRB(),
            inputs={'unlisted_sinogram': (unlisting, 'pet_sinogram')},
            requirements=[stir_req.v('3.0')])

        pipeline.add(
            'merge_sinograms',
            MergeUnlistingOutputs(),
            inputs={'sinograms': (ssrb, 'ssrb_sinograms')},
            ouputs={'ssrb_sinograms': ('sinogram_folder', directory_format)},
            joinsource='unlisting',
            joinfield=['sinograms'])

        return pipeline
Example #13
0
class EpiStudy(MriStudy, metaclass=StudyMetaClass):

    add_data_specs = [
        InputFilesetSpec('coreg_ref_wmseg', STD_IMAGE_FORMATS,
                         optional=True),
        InputFilesetSpec('field_map_mag', STD_IMAGE_FORMATS,
                         optional=True),
        InputFilesetSpec('field_map_phase', STD_IMAGE_FORMATS,
                         optional=True),
        FieldSpec('field_map_delta_te', float,
                  'field_map_time_info_pipeline')]

    add_param_specs = [
        SwitchSpec('bet_robust', True),
        ParamSpec('bet_f_threshold', 0.2),
        ParamSpec('bet_reduce_bias', False),
        ParamSpec('fugue_echo_spacing', 0.000275)]

    def preprocess_pipeline(self, **name_maps):

        if ('field_map_phase' in self.input_names and
                'field_map_mag' in self.input_names):
            return self._fugue_pipeline(**name_maps)
        else:
            return super().preprocess_pipeline(**name_maps)

    def _fugue_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            name='preprocess_pipeline',
            desc=("Fugue distortion correction pipeline"),
            citations=[fsl_cite],
            name_maps=name_maps)

        reorient_epi_in = pipeline.add(
            'reorient_epi_in',
            fsl.utils.Reorient2Std(
                output_type='NIFTI_GZ'),
            inputs={
                'in_file': ('magnitude', nifti_gz_format)},
            requirements=[fsl_req.v('5.0.9')])

        fm_mag_reorient = pipeline.add(
            'reorient_fm_mag',
            fsl.utils.Reorient2Std(
                output_type='NIFTI_GZ'),
            inputs={
                'in_file': ('field_map_mag', nifti_gz_format)},
            requirements=[fsl_req.v('5.0.9')])

        fm_phase_reorient = pipeline.add(
            'reorient_fm_phase',
            fsl.utils.Reorient2Std(
                output_type='NIFTI_GZ'),
            inputs={
                'in_file': ('field_map_phase', nifti_gz_format)},
            requirements=[fsl_req.v('5.0.9')])

        bet = pipeline.add(
            "bet",
            BET(
                robust=True,
                output_type='NIFTI_GZ'),
            inputs={
                'in_file': (fm_mag_reorient, 'out_file')},
            wall_time=5,
            requirements=[fsl_req.v('5.0.9')])

        create_fmap = pipeline.add(
            "prepfmap",
            PrepareFieldmap(
                # delta_TE=2.46
            ),
            inputs={
                'delta_TE': ('field_map_delta_te', float),
                "in_magnitude": (bet, "out_file"),
                'in_phase': (fm_phase_reorient, 'out_file')},
            wall_time=5,
            requirements=[fsl_req.v('5.0.9')])

        pipeline.add(
            'fugue',
            FUGUE(
                unwarp_direction='x',
                dwell_time=self.parameter('fugue_echo_spacing'),
                unwarped_file='example_func.nii.gz',
                output_type='NIFTI_GZ'),
            inputs={
                'fmap_in_file': (create_fmap, 'out_fieldmap'),
                'in_file': (reorient_epi_in, 'out_file')},
            outputs={
                'mag_preproc': ('unwarped_file', nifti_gz_format)},
            wall_time=5,
            requirements=[fsl_req.v('5.0.9')])

        return pipeline

    def field_map_time_info_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            name='field_map_time_info_pipeline',
            desc=("Pipeline to extract delta TE from field map "
                  "images, if provided"),
            citations=[fsl_cite],
            name_maps=name_maps)

        pipeline.add(
            'extract_delta_te',
            FieldMapTimeInfo(),
            inputs={
                'fm_mag': ('field_map_mag', dicom_format)},
            outputs={
                'field_map_delta_te': ('delta_te', float)})

        return pipeline
Example #14
0
class EpiSeriesStudy(EpiStudy, metaclass=StudyMetaClass):

    add_data_specs = [
        InputFilesetSpec('series', STD_IMAGE_FORMATS,
                         desc=("The set of EPI volumes that make up the "
                               "series")),
        InputFilesetSpec('coreg_ref_wmseg', STD_IMAGE_FORMATS,
                         optional=True),
        InputFilesetSpec('field_map_mag', STD_IMAGE_FORMATS,
                         optional=True),
        InputFilesetSpec('field_map_phase', STD_IMAGE_FORMATS,
                         optional=True),
        FilesetSpec('magnitude', nifti_gz_format, 'extract_magnitude_pipeline',
                    desc=("The magnitude image, typically extracted from "
                          "the provided series")),
        FilesetSpec('series_preproc', nifti_gz_format, 'preprocess_pipeline'),
        FilesetSpec('mag_preproc', nifti_gz_format, 'mag_preproc_pipeline'),
        FilesetSpec('series_coreg', nifti_gz_format, 'series_coreg_pipeline'),
        FilesetSpec('moco', nifti_gz_format,
                    'intrascan_alignment_pipeline'),
        FilesetSpec('align_mats', motion_mats_format,
                    'intrascan_alignment_pipeline'),
        FilesetSpec('moco_par', par_format,
                    'intrascan_alignment_pipeline'),
        FieldSpec('field_map_delta_te', float,
                  'field_map_time_info_pipeline')]

    add_param_specs = [
        MriStudy.param_spec('coreg_method').with_new_choices(
            'epireg', fallbacks={'epireg': 'flirt'})]

    primary_scan_name = 'series'

    @property
    def series_preproc_spec_name(self):
        if self.is_coregistered:
            preproc = 'series_coreg'
        else:
            preproc = 'series_preproc'
        return preproc

    def extract_magnitude_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            'extract_magnitude',
            desc="Extracts a single magnitude volume from a series",
            citations=[],
            name_maps=name_maps)

        pipeline.add(
            "extract_first_vol",
            MRConvert(
                coord=(3, 0)),
            inputs={
                'in_file': ('series', nifti_gz_format)},
            outputs={
                'magnitude': ('out_file', nifti_gz_format)},
            requirements=[mrtrix_req.v('3.0rc3')])

        return pipeline

    def preprocess_pipeline(self, **name_maps):
        return super().preprocess_pipeline(
            input_map={'magnitude': 'series'},
            output_map={'mag_preproc': 'series_preproc'},
            name_maps=name_maps)

    def mag_preproc_pipeline(self, **name_maps):
        return self.extract_magnitude_pipeline(
            input_map={'series': 'series_preproc'},
            output_map={'magnitude': 'mag_preproc'},
            name_maps=name_maps)

    def coreg_pipeline(self, **name_maps):
        if self.branch('coreg_method', 'epireg'):
            pipeline = self._epireg_linear_coreg_pipeline(**name_maps)
        else:
            pipeline = super().coreg_pipeline(**name_maps)
        return pipeline

    def _epireg_linear_coreg_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            name='linear_coreg',
            desc=("Intra-subjects epi registration improved using white "
                  "matter boundaries."),
            citations=[fsl_cite],
            name_maps=name_maps)

        epireg = pipeline.add(
            'epireg',
            fsl.epi.EpiReg(
                out_base='epireg2ref',
                output_type='NIFTI_GZ',
                no_clean=True),
            inputs={
                'epi': ('brain', nifti_gz_format),
                't1_brain': ('coreg_ref_brain', nifti_gz_format),
                't1_head': ('coreg_ref', nifti_gz_format)},
            outputs={
                'brain_coreg': ('out_file', nifti_gz_format),
                'coreg_fsl_mat': ('epi2str_mat', text_matrix_format)},
            requirements=[fsl_req.v('5.0.9')])

        if self.provided('coreg_ref_wmseg'):
            pipeline.connect_input('coreg_ref_wmseg', epireg, 'wmseg',
                                   nifti_gz_format)

        return pipeline

    def brain_coreg_pipeline(self, **name_maps):
        if self.branch('coreg_method', 'epireg'):
            pipeline = self.coreg_pipeline(
                name='brain_coreg',
                name_maps=dict(
                    input_map={
                        'mag_preproc': 'brain',
                        'coreg_ref': 'coreg_ref_brain'},
                    output_map={
                        'mag_coreg': 'brain_coreg'},
                    name_maps=name_maps))

            pipeline.add(
                'mask_transform',
                fsl.ApplyXFM(
                    output_type='NIFTI_GZ',
                    apply_xfm=True),
                inputs={
                    'in_matrix_file': (pipeline.node('epireg'), 'epi2str_mat'),
                    'in_file': ('brain_mask', nifti_gz_format),
                    'reference': ('coreg_ref_brain', nifti_gz_format)},
                outputs={
                    'brain_mask_coreg': ('out_file', nifti_gz_format)},
                requirements=[fsl_req.v('5.0.10')],
                wall_time=10)
        else:
            pipeline = super().brain_coreg_pipeline(**name_maps)

        return pipeline

    def series_coreg_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            'series_coreg',
            desc="Applies coregistration transform to DW series",
            citations=[],
            name_maps=name_maps)

        if self.provided('coreg_ref'):
            coreg_ref = 'coreg_ref'
        elif self.provided('coreg_ref_brain'):
            coreg_ref = 'coreg_ref_brain'
        else:
            raise BananaUsageError(
                "Cannot coregister DW series as reference ('coreg_ref' or "
                "'coreg_ref_brain') has not been provided to {}".format(self))

        # Apply co-registration transformation to DW series
        pipeline.add(
            'mask_transform',
            fsl.ApplyXFM(
                output_type='NIFTI_GZ',
                apply_xfm=True),
            inputs={
                'in_matrix_file': ('coreg_fsl_mat', text_matrix_format),
                'in_file': ('series_preproc', nifti_gz_format),
                'reference': (coreg_ref, nifti_gz_format)},
            outputs={
                'series_coreg': ('out_file', nifti_gz_format)},
            requirements=[fsl_req.v('5.0.10')],
            wall_time=10)

        return pipeline

    def intrascan_alignment_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            name='MCFLIRT_pipeline',
            desc=("Intra-epi volumes alignment."),
            citations=[fsl_cite],
            name_maps=name_maps)

        mcflirt = pipeline.add(
            'mcflirt',
            fsl.MCFLIRT(
                ref_vol=0,
                save_mats=True,
                save_plots=True,
                output_type='NIFTI_GZ',
                out_file='moco.nii.gz'),
            inputs={
                'in_file': ('mag_preproc', nifti_gz_format)},
            outputs={
                'moco': ('out_file', nifti_gz_format),
                'moco_par': ('par_file', par_format)},
            requirements=[fsl_req.v('5.0.9')])

        pipeline.add(
            'merge',
            MergeListMotionMat(),
            inputs={
                'file_list': (mcflirt, 'mat_file')},
            outputs={
                'align_mats': ('out_dir', motion_mats_format)})

        return pipeline

    def motion_mat_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            name='motion_mat_calculation',
            desc=("Motion matrices calculation"),
            citations=[fsl_cite],
            name_maps=name_maps)

        mm = pipeline.add(
            'motion_mats',
            MotionMatCalculation(),
            inputs={
                'reg_mat': ('coreg_fsl_mat', text_matrix_format),
                'qform_mat': ('qform_mat', text_matrix_format)},
            outputs={
                'motion_mats': ('motion_mats', motion_mats_format)})
        if 'reverse_phase' not in self.input_names:
            pipeline.connect_input('align_mats', mm, 'align_mats',
                                   motion_mats_format)

        return pipeline
Example #15
0
class TestDialationAnalysis(Analysis, metaclass=AnalysisMetaClass):

    add_data_specs = [
        InputFieldSpec('acquired_field1', int),
        InputFieldSpec('acquired_field2', int, optional=True),
        FieldSpec('derived_field1', int, 'pipeline1'),
        FieldSpec('derived_field2', int, 'pipeline2',
                  frequency='per_subject'),
        FieldSpec('derived_field3', int, 'pipeline3',
                  frequency='per_visit'),
        FieldSpec('derived_field4', int, 'pipeline4',
                  frequency='per_dataset'),
        FieldSpec('derived_field5', int, 'pipeline5')]

    add_param_specs = [
        ParamSpec('increment', 1),
        ParamSpec('pipeline3_op', 'add')]

    def pipeline1(self, **name_maps):
        pipeline = self.new_pipeline(
            'pipeline1',
            desc="",
            citations=[],
            name_maps=name_maps)
        math = pipeline.add(
            'math',
            TestMath(
                op='add',
                as_file=False),
            inputs={
                'x': ('acquired_field1', int)},
            outputs={
                'derived_field1': ('z', int)})
        if self.provided('acquired_field2'):
            pipeline.connect_input('acquired_field2', math, 'y')
        else:
            math.inputs.y = self.parameter('increment')
        return pipeline

    def pipeline2(self, **name_maps):
        pipeline = self.new_pipeline(
            'pipeline2',
            desc="",
            citations=[],
            name_maps=name_maps)
        pipeline.add(
            'math',
            TestMath(
                op='add',
                as_file=False),
            inputs={
                'x': ('derived_field1', int)},
            outputs={
                'derived_field2': ('z', int)},
            joinsource=self.VISIT_ID,
            joinfield='x')
        return pipeline

    def pipeline3(self, **name_maps):
        pipeline = self.new_pipeline(
            'pipeline3',
            desc="",
            citations=[],
            name_maps=name_maps)
        pipeline.add(
            'math',
            TestMath(
                op=self.parameter('pipeline3_op'),
                as_file=False),
            inputs={
                'x': ('derived_field1', int)},
            outputs={
                'derived_field3': ('z', int)},
            joinsource=self.SUBJECT_ID,
            joinfield='x')
        return pipeline

    def pipeline4(self, **name_maps):
        pipeline = self.new_pipeline(
            'pipeline4',
            desc="",
            citations=[],
            name_maps=name_maps)
        merge1 = pipeline.add(
            'merge1',
            Merge(
                numinputs=1,
                ravel_inputs=True),
            inputs={
                'in1': ('derived_field1', int)},
            joinsource=self.SUBJECT_ID,
            joinfield='in1')
        merge2 = pipeline.add(
            'merge2',
            Merge(
                numinputs=1,
                ravel_inputs=True),
            inputs={
                'in1': (merge1, 'out')},
            joinsource=self.VISIT_ID,
            joinfield='in1')
        pipeline.add(
            'math',
            TestMath(
                op='add',
                as_file=False),
            inputs={
                'x': (merge2, 'out')},
            outputs={
                'derived_field4': ('z', int)})
        return pipeline

    def pipeline5(self, **name_maps):
        pipeline = self.new_pipeline(
            'pipeline5',
            desc="",
            citations=[],
            name_maps=name_maps)
        merge = pipeline.add(
            'merge',
            Merge(
                numinputs=3),
            inputs={
                'in1': ('derived_field2', int),
                'in2': ('derived_field3', int),
                'in3': ('derived_field4', int)})
        pipeline.add(
            'math',
            TestMath(
                op='add',
                as_file=False),
            inputs={
                'x': (merge, 'out')},
            outputs={
                'derived_field5': ('z', float)})
        return pipeline
Example #16
0
class TestProvAnalysis(Analysis, metaclass=AnalysisMetaClass):

    add_data_specs = [
        InputFilesetSpec('acquired_fileset1', text_format),
        InputFilesetSpec('acquired_fileset2', text_format),
        InputFilesetSpec('acquired_fileset3', text_format),
        InputFieldSpec('acquired_field1', float),
        FilesetSpec('derived_fileset1', text_format, 'pipeline2'),
        FieldSpec('derived_field1', float, 'pipeline1', array=True),
        FieldSpec('derived_field2', float, 'pipeline3'),
        FieldSpec('derived_field3', float, 'pipeline3'),
        FieldSpec('derived_field4', float, 'pipeline2')]

    add_param_specs = [
        SwitchSpec('extra_req', False),
        SwitchSpec('branch', 'foo', ('foo', 'bar', 'wee')),
        ParamSpec('multiplier', 10.0),
        ParamSpec('subtract', 3)]

    def pipeline1(self, **name_maps):
        pipeline = self.new_pipeline(
            'pipeline1',
            desc="",
            citations=[],
            name_maps=name_maps)
        math1 = pipeline.add(
            'math1',
            TestMath(
                op='add'),
            inputs={
                'x': ('acquired_fileset1', text_format),
                'y': ('acquired_fileset2', text_format)},
            requirements=[
                a_req.v('1.0.1'),
                b_req.v(2)])
        math2 = pipeline.add(
            'math2',
            TestMath(
                op='add'),
            inputs={
                'y': ('acquired_field1', float),
                'x': (math1, 'z')},
            requirements=[
                c_req.v(0.1)])
        # Set up different requirements based on switch
        math3_reqs = [a_req.v(1)]
        if self.branch('extra_req'):
            math3_reqs.append(d_req.v('0.8.6'))
        math3 = pipeline.add(
            'math3',
            TestMath(
                op='mul',
                y=self.parameter('multiplier')),
            inputs={
                'x': (math2, 'z')},
            requirements=[
                b_req.v('2.7.0', '3.0')])
        pipeline.add(
            'merge1',
            Merge(3),
            inputs={
                'in1': (math1, 'z'),
                'in2': (math2, 'z'),
                'in3': (math3, 'z')},
            outputs={
                'derived_field1': ('out', float)},
            requirements=math3_reqs)
        return pipeline

    def pipeline2(self, **name_maps):
        pipeline = self.new_pipeline(
            'pipeline2',
            desc="",
            citations=[],
            name_maps=name_maps)
        split = pipeline.add(
            'split',
            Split(
                splits=[1, 1, 1],
                squeeze=True),
            inputs={
                'inlist': ('derived_field1', float)})
        math1 = pipeline.add(
            'math1',
            TestMath(
                op='add',
                as_file=True),
            inputs={
                'y': ('acquired_fileset3', text_format),
                'x': (split, 'out3')},
            requirements=[
                a_req.v('1.0')])
        math2 = pipeline.add(
            'math2',
            TestMath(
                op='add',
                as_file=True),
            inputs={
                'y': ('acquired_field1', float),
                'x': (math1, 'z')},
            outputs={
                'derived_fileset1': ('z', text_format)},
            requirements=[
                c_req.v(0.1)])
        pipeline.add(
            'math3',
            TestMath(
                op='sub',
                as_file=False,
                y=-1),
            inputs={
                'x': (math2, 'z')},
            outputs={
                'derived_field4': ('z', float)})
        return pipeline

    def pipeline3(self, **name_maps):
        pipeline = self.new_pipeline(
            'pipeline3',
            desc="",
            citations=[],
            name_maps=name_maps)
        pipeline.add(
            'math1',
            TestMath(
                op='add',
                as_file=False),
            inputs={
                'x': ('acquired_fileset2', text_format),
                'y': ('derived_fileset1', text_format)},
            outputs={
                'derived_field2': ('z', float)},
            requirements=[
                a_req.v('1.0')])
        pipeline.add(
            'math2',
            TestMath(
                op='sub',
                as_file=False,
                y=self.parameter('subtract')),
            inputs={
                'x': ('acquired_field1', float)},
            outputs={
                'derived_field3': ('z', float)},
            requirements=[
                c_req.v(0.1)])
        return pipeline
Example #17
0
File: epi.py Project: amrka/banana
class EpiStudy(MriStudy, metaclass=StudyMetaClass):

    add_data_specs = [
        AcquiredFilesetSpec('coreg_ref_preproc',
                            STD_IMAGE_FORMATS,
                            optional=True),
        AcquiredFilesetSpec('coreg_ref_wmseg',
                            STD_IMAGE_FORMATS,
                            optional=True),
        AcquiredFilesetSpec('reverse_phase', STD_IMAGE_FORMATS, optional=True),
        AcquiredFilesetSpec('field_map_mag', STD_IMAGE_FORMATS, optional=True),
        AcquiredFilesetSpec('field_map_phase',
                            STD_IMAGE_FORMATS,
                            optional=True),
        FilesetSpec('moco', nifti_gz_format, 'intrascan_alignment_pipeline'),
        FilesetSpec('align_mats', directory_format,
                    'intrascan_alignment_pipeline'),
        FilesetSpec('moco_par', par_format, 'intrascan_alignment_pipeline'),
        FieldSpec('field_map_delta_te', float, 'field_map_time_info_pipeline')
    ]

    add_param_specs = [
        ParameterSpec('bet_robust', True),
        ParameterSpec('bet_f_threshold', 0.2),
        ParameterSpec('bet_reduce_bias', False),
        ParameterSpec('fugue_echo_spacing', 0.000275),
        ParameterSpec('linear_coreg_method', 'epireg')
    ]

    def linear_brain_coreg_pipeline(self, **kwargs):
        if self.branch('linear_coreg_method', 'epireg'):
            return self._epireg_linear_brain_coreg_pipeline(**kwargs)
        else:
            return super(EpiStudy, self).linear_brain_coreg_pipeline(**kwargs)

    def _epireg_linear_brain_coreg_pipeline(self, **kwargs):

        pipeline = self.new_pipeline(
            name='linear_coreg',
            desc=("Intra-subjects epi registration improved using white "
                  "matter boundaries."),
            references=[fsl_cite],
            **kwargs)
        pipeline.add('epireg',
                     fsl.epi.EpiReg(out_base='epireg2ref'),
                     inputs={
                         'epi': ('brain', nifti_gz_format),
                         't1_brain': ('coreg_ref_brain', nifti_gz_format),
                         't1_head': ('coreg_ref_preproc', nifti_gz_format),
                         'wmseg': ('wmseg', nifti_gz_format)
                     },
                     outputs={
                         'out_file': ('coreg_brain', nifti_gz_format),
                         'epi2str_mat': ('coreg_matrix', text_matrix_format)
                     },
                     requirements=[fsl_req.v('5.0.9')])

        return pipeline

    def intrascan_alignment_pipeline(self, **kwargs):

        # inputs=[FilesetSpec('preproc', nifti_gz_format)],
        #   outputs=[FilesetSpec('moco', nifti_gz_format),
        #            FilesetSpec('align_mats', directory_format),
        #            FilesetSpec('moco_par', par_format)],

        pipeline = self.new_pipeline(name='MCFLIRT_pipeline',
                                     desc=("Intra-epi volumes alignment."),
                                     citations=[fsl_cite],
                                     **kwargs)
        mcflirt = pipeline.add('mcflirt',
                               fsl.MCFLIRT(),
                               requirements=[fsl_req.v('5.0.9')])
        mcflirt.inputs.ref_vol = 0
        mcflirt.inputs.save_mats = True
        mcflirt.inputs.save_plots = True
        mcflirt.inputs.output_type = 'NIFTI_GZ'
        mcflirt.inputs.out_file = 'moco.nii.gz'
        pipeline.connect_input('preproc', mcflirt, 'in_file')
        pipeline.connect_output('moco', mcflirt, 'out_file')
        pipeline.connect_output('moco_par', mcflirt, 'par_file')

        merge = pipeline.add('merge', MergeListMotionMat())
        pipeline.connect(mcflirt, 'mat_file', merge, 'file_list')
        pipeline.connect_output('align_mats', merge, 'out_dir')

        return pipeline

    def field_map_time_info_pipeline(self, **kwargs):

        pipeline = self.create_pipeline(
            name='field_map_time_info_pipeline',
            inputs=[DatasetSpec('field_map_mag', dicom_format)],
            outputs=[FieldSpec('field_map_delta_te', float)],
            desc=("Pipeline to extract delta TE from field map "
                  "images, if provided"),
            version=1,
            citations=[fsl_cite],
            **kwargs)

        delta_te = pipeline.create_node(FieldMapTimeInfo(),
                                        name='extract_delta_te')
        pipeline.connect_input('field_map_mag', delta_te, 'fm_mag')
        pipeline.connect_output('field_map_delta_te', delta_te, 'delta_te')

        return pipeline

    def preprocess_pipeline(self, **kwargs):

        if ('field_map_phase' in self.input_names
                and 'field_map_mag' in self.input_names):
            return self._fugue_pipeline(**kwargs)
        elif 'reverse_phase' in self.input_names:
            return self._topup_pipeline(**kwargs)
        else:
            return super(EpiStudy, self).preprocess_pipeline(**kwargs)

    def _topup_pipeline(self, **kwargs):

        #            inputs=[FilesetSpec('magnitude', nifti_gz_format),
        #                    FilesetSpec('reverse_phase', nifti_gz_format),
        #                    FieldSpec('ped', str),
        #                    FieldSpec('pe_angle', str)],
        #            outputs=[FilesetSpec('preproc', nifti_gz_format)],

        pipeline = self.new_pipeline(
            name='preprocess_pipeline',
            desc=("Topup distortion correction pipeline"),
            citations=[fsl_cite],
            **kwargs)

        reorient_epi_in = pipeline.create_node(
            fsl.utils.Reorient2Std(),
            name='reorient_epi_in',
            requirements=[fsl_req.v('5.0.9')])
        pipeline.connect_input('magnitude', reorient_epi_in, 'in_file')

        reorient_epi_opposite = pipeline.create_node(
            fsl.utils.Reorient2Std(),
            name='reorient_epi_opposite',
            requirements=[fsl_req.v('5.0.9')])
        pipeline.connect_input('reverse_phase', reorient_epi_opposite,
                               'in_file')
        prep_dwi = pipeline.create_node(PrepareDWI(), name='prepare_dwi')
        prep_dwi.inputs.topup = True
        pipeline.connect_input('ped', prep_dwi, 'pe_dir')
        pipeline.connect_input('pe_angle', prep_dwi, 'ped_polarity')
        pipeline.connect(reorient_epi_in, 'out_file', prep_dwi, 'dwi')
        pipeline.connect(reorient_epi_opposite, 'out_file', prep_dwi, 'dwi1')
        ped = pipeline.create_node(GenTopupConfigFiles(), name='gen_config')
        pipeline.connect(prep_dwi, 'pe', ped, 'ped')
        merge_outputs = pipeline.create_node(merge_lists(2),
                                             name='merge_files')
        pipeline.connect(prep_dwi, 'main', merge_outputs, 'in1')
        pipeline.connect(prep_dwi, 'secondary', merge_outputs, 'in2')
        merge = pipeline.create_node(fsl_merge(),
                                     name='fsl_merge',
                                     requirements=[fsl_req.v('5.0.9')])
        merge.inputs.dimension = 't'
        pipeline.connect(merge_outputs, 'out', merge, 'in_files')
        topup = pipeline.create_node(TOPUP(),
                                     name='topup',
                                     requirements=[fsl_req.v('5.0.9')])
        pipeline.connect(merge, 'merged_file', topup, 'in_file')
        pipeline.connect(ped, 'config_file', topup, 'encoding_file')
        in_apply_tp = pipeline.create_node(merge_lists(1), name='in_apply_tp')
        pipeline.connect(reorient_epi_in, 'out_file', in_apply_tp, 'in1')
        apply_topup = pipeline.create_node(ApplyTOPUP(),
                                           name='applytopup',
                                           requirements=[fsl_req.v('5.0.9')])
        apply_topup.inputs.method = 'jac'
        apply_topup.inputs.in_index = [1]
        pipeline.connect(in_apply_tp, 'out', apply_topup, 'in_files')
        pipeline.connect(ped, 'apply_topup_config', apply_topup,
                         'encoding_file')
        pipeline.connect(topup, 'out_movpar', apply_topup, 'in_topup_movpar')
        pipeline.connect(topup, 'out_fieldcoef', apply_topup,
                         'in_topup_fieldcoef')

        pipeline.connect_output('preproc', apply_topup, 'out_corrected')
        return pipeline

    def _fugue_pipeline(self, **kwargs):

        #            inputs=[FilesetSpec('magnitude', nifti_gz_format),
        #                    FilesetSpec('field_map_mag', nifti_gz_format),
        #                    FilesetSpec('field_map_phase', nifti_gz_format)],
        #            outputs=[FilesetSpec('preproc', nifti_gz_format)],

        pipeline = self.new_pipeline(
            name='preprocess_pipeline',
            desc=("Fugue distortion correction pipeline"),
            references=[fsl_cite],
            **kwargs)

        reorient_epi_in = pipeline.create_node(
            fsl.utils.Reorient2Std(),
            name='reorient_epi_in',
            requirements=[fsl_req.v('5.0.9')])
        pipeline.connect_input('magnitude', reorient_epi_in, 'in_file')
        fm_mag_reorient = pipeline.create_node(
            fsl.utils.Reorient2Std(),
            name='reorient_fm_mag',
            requirements=[fsl_req.v('5.0.9')])
        pipeline.connect_input('field_map_mag', fm_mag_reorient, 'in_file')
        fm_phase_reorient = pipeline.create_node(
            fsl.utils.Reorient2Std(),
            name='reorient_fm_phase',
            requirements=[fsl_req.v('5.0.9')])
        pipeline.connect_input('field_map_phase', fm_phase_reorient, 'in_file')
        bet = pipeline.create_node(BET(),
                                   name="bet",
                                   wall_time=5,
                                   requirements=[fsl_req.v('5.0.9')])
        bet.inputs.robust = True
        pipeline.connect(fm_mag_reorient, 'out_file', bet, 'in_file')
        create_fmap = pipeline.create_node(PrepareFieldmap(),
                                           name="prepfmap",
                                           wall_time=5,
                                           requirements=[fsl_req.v('5.0.9')])
        #         create_fmap.inputs.delta_TE = 2.46
        pipeline.connect_input('field_map_delta_te', create_fmap, 'delta_TE')
        pipeline.connect(bet, "out_file", create_fmap, "in_magnitude")
        pipeline.connect(fm_phase_reorient, 'out_file', create_fmap,
                         'in_phase')

        fugue = pipeline.create_node(FUGUE(),
                                     name='fugue',
                                     wall_time=5,
                                     requirements=[fsl_req.v('5.0.9')])
        fugue.inputs.unwarp_direction = 'x'
        fugue.inputs.dwell_time = self.parameter('fugue_echo_spacing')
        fugue.inputs.unwarped_file = 'example_func.nii.gz'
        pipeline.connect(create_fmap, 'out_fieldmap', fugue, 'fmap_in_file')
        pipeline.connect(reorient_epi_in, 'out_file', fugue, 'in_file')
        pipeline.connect_output('preproc', fugue, 'unwarped_file')
        return pipeline

    def motion_mat_pipeline(self, **kwargs):

        #        inputs = [FilesetSpec('coreg_matrix', text_matrix_format),
        #                  FilesetSpec('qform_mat', text_matrix_format)]
        #            inputs=inputs,
        #            outputs=[FilesetSpec('motion_mats', motion_mats_format)],

        #        if 'reverse_phase' not in self.input_names:
        #            inputs.append(FilesetSpec('align_mats', directory_format))
        pipeline = self.new_pipeline(name='motion_mat_calculation',
                                     desc=("Motion matrices calculation"),
                                     citations=[fsl_cite],
                                     **kwargs)

        mm = pipeline.create_node(MotionMatCalculation(), name='motion_mats')
        pipeline.connect_input('coreg_matrix', mm, 'reg_mat')
        pipeline.connect_input('qform_mat', mm, 'qform_mat')
        if 'reverse_phase' not in self.input_names:
            pipeline.connect_input('align_mats', mm, 'align_mats')
        pipeline.connect_output('motion_mats', mm, 'motion_mats')
        return pipeline
Example #18
0
class ExampleAnalysis(with_metaclass(AnalysisMetaClass, Analysis)):

    add_data_specs = [
        InputFilesetSpec('one', text_format),
        InputFilesetSpec('ten', text_format),
        FilesetSpec('derived1_1', text_format, 'pipeline1'),
        FilesetSpec('derived1_2', text_format, 'pipeline1'),
        FilesetSpec('derived2', text_format, 'pipeline2'),
        FilesetSpec('derived3', text_format, 'pipeline3'),
        FilesetSpec('derived4', text_format, 'pipeline4'),
        FieldSpec('derived5a', str, 'pipeline5', pipeline_args={'arg': 'a'}),
        FieldSpec('derived5b', str, 'pipeline5', pipeline_args={'arg': 'b'}),
        FilesetSpec('subject_summary',
                    text_format,
                    'subject_summary_pipeline',
                    frequency='per_subject'),
        FilesetSpec('visit_summary',
                    text_format,
                    'visit_summary_pipeline',
                    frequency='per_visit'),
        FilesetSpec('analysis_summary',
                    text_format,
                    'analysis_summary_pipeline',
                    frequency='per_dataset'),
        FilesetSpec('subject_ids',
                    text_format,
                    'subject_ids_access_pipeline',
                    frequency='per_visit'),
        FilesetSpec('visit_ids',
                    text_format,
                    'visit_ids_access_pipeline',
                    frequency='per_subject')
    ]

    add_param_specs = [ParamSpec('pipeline_parameter', False)]

    def pipeline1(self, **name_maps):
        pipeline = self.new_pipeline(
            name='pipeline1',
            desc="A dummy pipeline used to test 'run_pipeline' method",
            citations=[],
            name_maps=name_maps)
        if not self.parameter('pipeline_parameter'):
            raise Exception("Pipeline parameter was not accessible")
        indent = pipeline.add("ident1", IdentityInterface(['file']))
        indent2 = pipeline.add("ident2", IdentityInterface(['file']))
        # Connect inputs
        pipeline.connect_input('one', indent, 'file')
        pipeline.connect_input('one', indent2, 'file')
        # Connect outputs
        pipeline.connect_output('derived1_1', indent, 'file')
        pipeline.connect_output('derived1_2', indent2, 'file')
        return pipeline

    def pipeline2(self, **name_maps):
        pipeline = self.new_pipeline(
            name='pipeline2',
            desc="A dummy pipeline used to test 'run_pipeline' method",
            citations=[],
            name_maps=name_maps)
        if not self.parameter('pipeline_parameter'):
            raise Exception("Pipeline parameter was not cascaded down to "
                            "pipeline2")
        math = pipeline.add("math", TestMath())
        math.inputs.op = 'add'
        math.inputs.as_file = True
        # Connect inputs
        pipeline.connect_input('one', math, 'x')
        pipeline.connect_input('derived1_1', math, 'y')
        # Connect outputs
        pipeline.connect_output('derived2', math, 'z')
        return pipeline

    def pipeline3(self, **name_maps):
        pipeline = self.new_pipeline(
            name='pipeline3',
            desc="A dummy pipeline used to test 'run_pipeline' method",
            citations=[],
            name_maps=name_maps)
        indent = pipeline.add('ident', IdentityInterface(['file']))
        # Connect inputs
        pipeline.connect_input('derived2', indent, 'file')
        # Connect outputs
        pipeline.connect_output('derived3', indent, 'file')
        return pipeline

    def pipeline4(self, **name_maps):
        pipeline = self.new_pipeline(
            name='pipeline4',
            desc="A dummy pipeline used to test 'run_pipeline' method",
            citations=[],
            name_maps=name_maps)
        math = pipeline.add("mrcat", TestMath())
        math.inputs.op = 'mul'
        math.inputs.as_file = True
        # Connect inputs
        pipeline.connect_input('derived1_2', math, 'x')
        pipeline.connect_input('derived3', math, 'y')
        # Connect outputs
        pipeline.connect_output('derived4', math, 'z')
        return pipeline

    def pipeline5(self, arg, **name_maps):

        pipeline = self.new_pipeline(
            name='pipeline5{}'.format(arg),
            desc="A dummy pipeline used to test constructor arguments",
            citations=[],
            name_maps=name_maps)

        pipeline.add("ident",
                     IdentityInterface(fields=['value', 'dummy'], value=arg),
                     inputs={'dummy': ('one', text_format)},
                     outputs={'derived5{}'.format(arg): ('value', str)})

        return pipeline

    def visit_ids_access_pipeline(self, **name_maps):
        pipeline = self.new_pipeline(
            name='visit_ids_access',
            desc=("A dummy pipeline used to test access to 'session' IDs"),
            citations=[],
            name_maps=name_maps)
        visits_to_file = pipeline.add('visits_to_file',
                                      IteratorToFile(),
                                      joinsource=self.VISIT_ID,
                                      joinfield='ids')
        pipeline.connect_input(self.VISIT_ID, visits_to_file, 'ids')
        pipeline.connect_input(self.SUBJECT_ID, visits_to_file, 'fixed_id')
        pipeline.connect_output('visit_ids', visits_to_file, 'out_file')
        return pipeline

    def subject_ids_access_pipeline(self, **name_maps):
        pipeline = self.new_pipeline(
            name='subject_ids_access',
            desc=("A dummy pipeline used to test access to 'subject' IDs"),
            citations=[],
            name_maps=name_maps)
        subjects_to_file = pipeline.add('subjects_to_file',
                                        IteratorToFile(),
                                        joinfield='ids',
                                        joinsource=self.SUBJECT_ID)
        pipeline.connect_input(self.SUBJECT_ID, subjects_to_file, 'ids')
        pipeline.connect_input(self.VISIT_ID, subjects_to_file, 'fixed_id')
        pipeline.connect_output('subject_ids', subjects_to_file, 'out_file')
        return pipeline

    def subject_summary_pipeline(self, **name_maps):
        pipeline = self.new_pipeline(
            name="subject_summary",
            desc=("Test of project summary variables"),
            citations=[],
            name_maps=name_maps)
        math = pipeline.add('math',
                            TestMath(),
                            joinfield='x',
                            joinsource=self.VISIT_ID)
        math.inputs.op = 'add'
        math.inputs.as_file = True
        # Connect inputs
        pipeline.connect_input('one', math, 'x')
        # Connect outputs
        pipeline.connect_output('subject_summary', math, 'z')
        return pipeline

    def visit_summary_pipeline(self, **name_maps):
        pipeline = self.new_pipeline(
            name="visit_summary",
            desc=("Test of project summary variables"),
            citations=[],
            name_maps=name_maps)
        math = pipeline.add('math',
                            TestMath(),
                            joinfield='x',
                            joinsource=self.SUBJECT_ID)
        math.inputs.op = 'add'
        math.inputs.as_file = True
        # Connect inputs
        pipeline.connect_input('one', math, 'x')
        # Connect outputs
        pipeline.connect_output('visit_summary', math, 'z')
        return pipeline

    def analysis_summary_pipeline(self, **name_maps):
        pipeline = self.new_pipeline(
            name="analysis_summary",
            desc=("Test of project summary variables"),
            citations=[],
            name_maps=name_maps)
        math1 = pipeline.add('math1',
                             TestMath(),
                             joinfield='x',
                             joinsource=self.VISIT_ID)
        math2 = pipeline.add('math2',
                             TestMath(),
                             joinfield='x',
                             joinsource=self.SUBJECT_ID)
        math1.inputs.op = 'add'
        math2.inputs.op = 'add'
        math1.inputs.as_file = True
        math2.inputs.as_file = True
        # Connect inputs
        pipeline.connect_input('one', math1, 'x')
        pipeline.connect(math1, 'z', math2, 'x')
        # Connect outputs
        pipeline.connect_output('analysis_summary', math2, 'z')
        return pipeline