Ejemplo n.º 1
0
class StaticPetStudy(PetStudy, metaclass=StudyMetaClass):

    add_data_specs = [
        InputFilesetSpec('pet_image', nifti_gz_format),
        InputFilesetSpec('base_mask', nifti_gz_format),
        FilesetSpec('SUVR_image', nifti_gz_format, 'suvr_pipeline')]

    primary_scan_name = 'pet_image'

    def suvr_pipeline(self, **kwargs):

        pipeline = self.new_pipeline(
            name='SUVR',
            desc=('Calculate SUVR image'),
            citations=[],
            **kwargs)

        pipeline.add(
            'SUVR',
            SUVRCalculation(),
            inputs={
                'volume': ('registered_volume', nifti_gz_format),
                'base_mask': ('base_mask', nifti_gz_format)},
            outputs={
                'SUVR_image': ('SUVR_file', nifti_gz_format)})

        return pipeline

    def _ica_inputs(self):
        pass
Ejemplo n.º 2
0
class DummyStudy(with_metaclass(StudyMetaClass, Study)):

    add_data_specs = [
        InputFilesetSpec('source1', text_format),
        InputFilesetSpec('source2', text_format),
        InputFilesetSpec('source3', text_format),
        InputFilesetSpec('source4', text_format, optional=True),
        FilesetSpec('sink1', text_format, 'dummy_pipeline'),
        FilesetSpec('sink3', text_format, 'dummy_pipeline'),
        FilesetSpec('sink4', text_format, 'dummy_pipeline'),
        FilesetSpec('subject_sink',
                    text_format,
                    'dummy_pipeline',
                    frequency='per_subject'),
        FilesetSpec('visit_sink',
                    text_format,
                    'dummy_pipeline',
                    frequency='per_visit'),
        FilesetSpec('project_sink',
                    text_format,
                    'dummy_pipeline',
                    frequency='per_study'),
        FilesetSpec('resink1', text_format, 'dummy_pipeline'),
        FilesetSpec('resink2', text_format, 'dummy_pipeline'),
        FilesetSpec('resink3', text_format, 'dummy_pipeline')
    ]

    def dummy_pipeline(self):
        pass
Ejemplo n.º 3
0
class DummyAnalysis(Analysis, metaclass=AnalysisMetaClass):

    add_data_specs = [
        InputFilesetSpec('source1', text_format),
        InputFilesetSpec('source2', text_format, optional=True),
        InputFilesetSpec('source3', text_format, optional=True),
        InputFilesetSpec('source4', text_format, optional=True),
        FilesetSpec('sink1', text_format, 'dummy_pipeline'),
        FilesetSpec('sink3', text_format, 'dummy_pipeline'),
        FilesetSpec('sink4', text_format, 'dummy_pipeline'),
        FilesetSpec('subject_sink',
                    text_format,
                    'dummy_pipeline',
                    frequency='per_subject'),
        FilesetSpec('visit_sink',
                    text_format,
                    'dummy_pipeline',
                    frequency='per_visit'),
        FilesetSpec('analysis_sink',
                    text_format,
                    'dummy_pipeline',
                    frequency='per_dataset'),
        FilesetSpec('resink1', text_format, 'dummy_pipeline'),
        FilesetSpec('resink2', text_format, 'dummy_pipeline'),
        FilesetSpec('resink3', text_format, 'dummy_pipeline'),
        FieldSpec('field1', int, 'dummy_pipeline'),
        FieldSpec('field2', float, 'dummy_pipeline'),
        FieldSpec('field3', str, 'dummy_pipeline')
    ]

    def dummy_pipeline(self, **name_maps):
        return self.new_pipeline('dummy_pipeline', name_maps=name_maps)
Ejemplo n.º 4
0
class DummyStudy(with_metaclass(StudyMetaClass, Study)):

    add_data_specs = [
        InputFilesetSpec('source1', text_format, optional=True),
        InputFilesetSpec('source2', text_format, optional=True),
        InputFilesetSpec('source3', text_format, optional=True),
        InputFilesetSpec('source4', text_format, optional=True),
        FilesetSpec('sink1', text_format, 'dummy_pipeline'),
        FilesetSpec('sink3', text_format, 'dummy_pipeline'),
        FilesetSpec('sink4', text_format, 'dummy_pipeline'),
        FilesetSpec('subject_sink',
                    text_format,
                    'dummy_pipeline',
                    frequency='per_subject'),
        FilesetSpec('visit_sink',
                    text_format,
                    'dummy_pipeline',
                    frequency='per_visit'),
        FilesetSpec('study_sink',
                    text_format,
                    'dummy_pipeline',
                    frequency='per_study'),
        FilesetSpec('resink1', text_format, 'dummy_pipeline'),
        FilesetSpec('resink2', text_format, 'dummy_pipeline'),
        FilesetSpec('resink3', text_format, 'dummy_pipeline'),
        FieldSpec('field1', int, 'dummy_pipeline'),
        FieldSpec('field2', float, 'dummy_pipeline'),
        FieldSpec('field3', str, 'dummy_pipeline')
    ]

    def dummy_pipeline(self, **name_maps):
        return self.new_pipeline('dummy', name_maps=name_maps)
Ejemplo n.º 5
0
class StudyA(with_metaclass(StudyMetaClass, Study)):

    add_data_specs = [
        InputFilesetSpec('x', text_format),
        InputFilesetSpec('y', text_format),
        FilesetSpec('z', text_format, 'pipeline_alpha')
    ]

    add_param_specs = [
        ParamSpec('o1', 1),
        ParamSpec('o2', '2'),
        ParamSpec('o3', 3.0)
    ]

    def pipeline_alpha(self, **name_maps):
        pipeline = self.new_pipeline(
            name='pipeline_alpha',
            desc="A dummy pipeline used to test MultiStudy class",
            citations=[],
            name_maps=name_maps)
        math = pipeline.add("math", TestMath())
        math.inputs.op = 'add'
        math.inputs.as_file = True
        # Connect inputs
        pipeline.connect_input('x', math, 'x')
        pipeline.connect_input('y', math, 'y')
        # Connect outputs
        pipeline.connect_output('z', math, 'z')
        return pipeline
Ejemplo n.º 6
0
class TestAnalysis(with_metaclass(AnalysisMetaClass, Analysis)):

    add_data_specs = [
        InputFilesetSpec('fileset1', text_format),
        InputFilesetSpec('fileset2', text_format, optional=True),
        InputFilesetSpec('fileset3', text_format),
        InputFilesetSpec('fileset5', text_format, optional=True)
    ]
Ejemplo n.º 7
0
class TestStudy(with_metaclass(StudyMetaClass, Study)):

    add_data_specs = [
        InputFilesetSpec('fileset1', text_format),
        InputFilesetSpec('fileset2', text_format, optional=True),
        InputFilesetSpec('fileset3', text_format),
        InputFilesetSpec('fileset5', text_format, optional=True)
    ]
Ejemplo n.º 8
0
class ConversionStudy(with_metaclass(StudyMetaClass, Study)):

    add_data_specs = [
        InputFilesetSpec('text', text_format),
        InputFilesetSpec('directory', directory_format),
        InputFilesetSpec('zip', zip_format),
        FilesetSpec('text_from_text', text_format, 'conv_pipeline'),
        FilesetSpec('directory_from_zip_on_input', directory_format,
                    'conv_pipeline'),
        FilesetSpec('zip_from_directory_on_input', zip_format,
                    'conv_pipeline'),
        FilesetSpec('directory_from_zip_on_output', directory_format,
                    'conv_pipeline'),
        FilesetSpec('zip_from_directory_on_output', zip_format,
                    'conv_pipeline')
    ]

    def conv_pipeline(self, **name_maps):
        pipeline = self.new_pipeline(
            name='conv_pipeline',
            name_maps=name_maps,
            desc=("A pipeline that tests out various data format "
                  "conversions"))
        # No conversion from text to text format
        pipeline.add('text_from_text',
                     IdentityInterface(fields=['file']),
                     inputs={'file': ('text', text_format)},
                     outputs={'text_from_text': ('file', text_format)})
        # Convert from zip file to directory format on input
        pipeline.add('directory_from_zip_on_input',
                     IdentityInterface(fields=['file']),
                     inputs={'file': ('zip', directory_format)},
                     outputs={
                         'directory_from_zip_on_input':
                         ('file', directory_format)
                     })
        # Convert from zip file to directory format on input
        pipeline.add(
            'directory_from_zip_on_output',
            IdentityInterface(fields=['file']),
            inputs={'file': ('zip', zip_format)},
            outputs={'directory_from_zip_on_output': ('file', zip_format)})
        # Convert from directory to zip format on input
        pipeline.add(
            'zip_from_directory_on_input',
            IdentityInterface(fields=['file']),
            inputs={'file': ('directory', zip_format)},
            outputs={'zip_from_directory_on_input': ('file', zip_format)})
        # Convert from directory to zip format on input
        pipeline.add('zip_from_directory_on_output',
                     IdentityInterface(fields=['file']),
                     inputs={'file': ('directory', directory_format)},
                     outputs={
                         'zip_from_directory_on_output':
                         ('file', directory_format)
                     })
        return pipeline
Ejemplo n.º 9
0
class TestMatchAnalysis(with_metaclass(AnalysisMetaClass, Analysis)):

    add_data_specs = [
        InputFilesetSpec('gre_phase', dicom_format),
        InputFilesetSpec('gre_mag', dicom_format)]

    def dummy_pipeline1(self):
        pass

    def dummy_pipeline2(self):
        pass
Ejemplo n.º 10
0
class ConversionAnalysis(Analysis, metaclass=AnalysisMetaClass):

    add_data_specs = [
        InputFilesetSpec('mrtrix', text_format),
        InputFilesetSpec('nifti_gz', text_format),
        InputFilesetSpec('dicom', dicom_format),
        InputFilesetSpec('directory', directory_format),
        InputFilesetSpec('zip', zip_format),
        FilesetSpec('nifti_gz_from_dicom', text_format, 'conv_pipeline'),
        FilesetSpec('mrtrix_from_nifti_gz', text_format, 'conv_pipeline'),
        FilesetSpec('nifti_from_mrtrix', nifti_format, 'conv_pipeline'),
        FilesetSpec('directory_from_zip', directory_format, 'conv_pipeline'),
        FilesetSpec('zip_from_directory', zip_format, 'conv_pipeline')
    ]

    def conv_pipeline(self):
        pipeline = self.new_pipeline(
            name='conv_pipeline',
            desc=("A pipeline that tests out various data format conversions"),
            citations=[],
        )
        # Convert from DICOM to NIfTI.gz format on input
        nifti_gz_from_dicom = pipeline.add('nifti_gz_from_dicom',
                                           IdentityInterface(fields=['file']))
        pipeline.connect_input('dicom', nifti_gz_from_dicom, 'file')
        pipeline.connect_output('nifti_gz_from_dicom', nifti_gz_from_dicom,
                                'file')
        # Convert from NIfTI.gz to MRtrix format on output
        mrtrix_from_nifti_gz = pipeline.add('mrtrix_from_nifti_gz',
                                            IdentityInterface(fields=['file']))
        pipeline.connect_input('nifti_gz', mrtrix_from_nifti_gz, 'file')
        pipeline.connect_output('mrtrix_from_nifti_gz', mrtrix_from_nifti_gz,
                                'file')
        # Convert from MRtrix to NIfTI format on output
        nifti_from_mrtrix = pipeline.add('nifti_from_mrtrix',
                                         IdentityInterface(fields=['file']))
        pipeline.connect_input('mrtrix', nifti_from_mrtrix, 'file')
        pipeline.connect_output('nifti_from_mrtrix', nifti_from_mrtrix, 'file')
        # Convert from zip file to directory format on input
        directory_from_zip = pipeline.add(
            'directory_from_zip',
            IdentityInterface(fields=['file']),
        )
        pipeline.connect_input('zip', directory_from_zip, 'file')
        pipeline.connect_output('directory_from_zip', directory_from_zip,
                                'file')
        # Convert from NIfTI.gz to MRtrix format on output
        zip_from_directory = pipeline.add('zip_from_directory',
                                          IdentityInterface(fields=['file']))
        pipeline.connect_input('directory', zip_from_directory, 'file')
        pipeline.connect_output('zip_from_directory', zip_from_directory,
                                'file')
        return pipeline
Ejemplo n.º 11
0
class TestMatchStudy(with_metaclass(StudyMetaClass, Study)):

    add_data_specs = [
        InputFilesetSpec('gre_phase', dicom_format),
        InputFilesetSpec('gre_mag', dicom_format)
    ]

    def dummy_pipeline1(self):
        pass

    def dummy_pipeline2(self):
        pass
Ejemplo n.º 12
0
class StudyB(with_metaclass(StudyMetaClass, Study)):

    add_data_specs = [
        InputFilesetSpec('w', text_format),
        InputFilesetSpec('x', text_format),
        FilesetSpec('y', text_format, 'pipeline_beta'),
        FilesetSpec('z', text_format, 'pipeline_beta')
    ]

    add_param_specs = [
        ParamSpec('o1', 10),
        ParamSpec('o2', '20'),
        ParamSpec('o3', 30.0),
        ParamSpec('product_op', None, dtype=str)
    ]  # To be set to 'product'

    def pipeline_beta(self, **name_maps):
        pipeline = self.new_pipeline(
            name='pipeline_beta',
            desc="A dummy pipeline used to test MultiStudy class",
            citations=[],
            name_maps=name_maps)
        add1 = pipeline.add("add1", TestMath())
        add2 = pipeline.add("add2", TestMath())
        prod = pipeline.add("product", TestMath())
        add1.inputs.op = 'add'
        add2.inputs.op = 'add'
        if self.parameter('product_op') is None:
            raise NotSpecifiedRequiredParameter
        prod.inputs.op = self.parameter('product_op')
        add1.inputs.as_file = True
        add2.inputs.as_file = True
        prod.inputs.as_file = True
        # Connect inputs
        pipeline.connect_input('w', add1, 'x')
        pipeline.connect_input('x', add1, 'y')
        pipeline.connect_input('x', add2, 'x')
        # Connect nodes
        pipeline.connect(add1, 'z', add2, 'y')
        pipeline.connect(add1, 'z', prod, 'x')
        pipeline.connect(add2, 'z', prod, 'y')
        # Connect outputs
        pipeline.connect_output('y', add2, 'z')
        pipeline.connect_output('z', prod, 'z')
        return pipeline
Ejemplo n.º 13
0
class FullMultiStudy(with_metaclass(MultiStudyMetaClass, MultiStudy)):

    add_substudy_specs = [
        SubStudySpec('ss1', StudyA, {
            'x': 'a',
            'y': 'b',
            'z': 'd',
            'o1': 'p1',
            'o2': 'p2',
            'o3': 'p3'
        }),
        SubStudySpec(
            'ss2', StudyB, {
                'w': 'b',
                'x': 'c',
                'y': 'e',
                'z': 'f',
                'o1': 'q1',
                'o2': 'q2',
                'o3': 'p3',
                'product_op': 'required_op'
            })
    ]

    add_data_specs = [
        InputFilesetSpec('a', text_format),
        InputFilesetSpec('b', text_format),
        InputFilesetSpec('c', text_format),
        FilesetSpec('d', text_format, 'pipeline_alpha_trans'),
        FilesetSpec('e', text_format, 'pipeline_beta_trans'),
        FilesetSpec('f', text_format, 'pipeline_beta_trans')
    ]

    add_param_specs = [
        ParamSpec('p1', 100),
        ParamSpec('p2', '200'),
        ParamSpec('p3', 300.0),
        ParamSpec('q1', 150),
        ParamSpec('q2', '250'),
        ParamSpec('required_op', None, dtype=str)
    ]

    pipeline_alpha_trans = MultiStudy.translate('ss1', 'pipeline_alpha')
    pipeline_beta_trans = MultiStudy.translate('ss2', 'pipeline_beta')
Ejemplo n.º 14
0
class TestInputValidationAnalysis(with_metaclass(AnalysisMetaClass, Analysis)):

    add_data_specs = [
        InputFilesetSpec('a', (test1_format, test2_format)),
        InputFilesetSpec('b', test3_format),
        FilesetSpec('c', test2_format, 'identity_pipeline'),
        FilesetSpec('d', test3_format, 'identity_pipeline')
    ]

    def identity_pipeline(self, **name_maps):
        pipeline = self.new_pipeline(
            name='pipeline',
            desc="A dummy pipeline used to test analysis input validation",
            citations=[],
            name_maps=name_maps)
        identity = pipeline.add('identity', IdentityInterface(['a', 'b']))
        pipeline.connect_input('a', identity, 'a')
        pipeline.connect_input('b', identity, 'b')
        pipeline.connect_output('c', identity, 'a')
        pipeline.connect_output('d', identity, 'b')
Ejemplo n.º 15
0
class RequirementsAnalysis(with_metaclass(AnalysisMetaClass, Analysis)):

    add_data_specs = [
        InputFilesetSpec('ones', text_format),
        FilesetSpec('twos', text_format, 'pipeline1'),
        FieldSpec('threes', float, 'pipeline2'),
        FieldSpec('fours', float, 'pipeline2')
    ]

    def pipeline1(self, **name_maps):
        pipeline = self.new_pipeline(
            name='pipeline1',
            desc=("A pipeline that tests loading of requirements"),
            name_maps=name_maps)
        # Convert from DICOM to NIfTI.gz format on input
        maths = pipeline.add(
            "maths",
            TestMathWithReq(),
            requirements=[first_req.v('0.15.9'),
                          second_req.v('1.0.2')])
        maths.inputs.op = 'add'
        maths.inputs.as_file = True
        maths.inputs.y = 1
        pipeline.connect_input('ones', maths, 'x', text_format)
        pipeline.connect_output('twos', maths, 'z', text_format)
        return pipeline

    def pipeline2(self, **name_maps):
        pipeline = self.new_pipeline(
            name='pipeline2',
            desc=("A pipeline that tests loading of requirements in "
                  "map nodes"),
            name_maps=name_maps)
        # Convert from DICOM to NIfTI.gz format on input
        merge = pipeline.add("merge", Merge(2))
        maths = pipeline.add(
            "maths",
            TestMathWithReq(),
            iterfield='x',
            requirements=[first_req.v('0.15.9'),
                          second_req.v('1.0.2')])
        split = pipeline.add('split', Split())
        split.inputs.splits = [1, 1]
        split.inputs.squeeze = True
        maths.inputs.op = 'add'
        maths.inputs.y = 2
        pipeline.connect_input('ones', merge, 'in1', text_format)
        pipeline.connect_input('twos', merge, 'in2', text_format)
        pipeline.connect(merge, 'out', maths, 'x')
        pipeline.connect(maths, 'z', split, 'inlist')
        pipeline.connect_output('threes', split, 'out1', text_format)
        pipeline.connect_output('fours', split, 'out2', text_format)
        return pipeline
Ejemplo n.º 16
0
class PartialMultiStudy(with_metaclass(MultiStudyMetaClass, MultiStudy)):

    add_substudy_specs = [
        SubStudySpec('ss1', StudyA, {
            'x': 'a',
            'y': 'b',
            'o1': 'p1'
        }),
        SubStudySpec('ss2', StudyB, {
            'w': 'b',
            'x': 'c',
            'o1': 'p1'
        })
    ]

    add_data_specs = [
        InputFilesetSpec('a', text_format),
        InputFilesetSpec('b', text_format),
        InputFilesetSpec('c', text_format)
    ]

    pipeline_alpha_trans = MultiStudy.translate('ss1', 'pipeline_alpha')

    add_param_specs = [ParamSpec('p1', 1000)]
Ejemplo n.º 17
0
class PartialMultiAnalysis(MultiAnalysis, metaclass=MultiAnalysisMetaClass):

    add_subcomp_specs = [
        SubCompSpec('ss1', AnalysisA, {
            'x': 'a',
            'y': 'b',
            'o1': 'p1'
        }),
        SubCompSpec('ss2', AnalysisB, {
            'w': 'b',
            'x': 'c',
            'o1': 'p1'
        })
    ]

    add_data_specs = [
        InputFilesetSpec('a', text_format),
        InputFilesetSpec('b', text_format),
        InputFilesetSpec('c', text_format)
    ]

    pipeline_alpha_trans = MultiAnalysis.translate('ss1', 'pipeline_alpha')

    add_param_specs = [ParamSpec('p1', 1000)]
Ejemplo n.º 18
0
class DummyStudy(Study, metaclass=StudyMetaClass):

    add_data_specs = [
        InputFilesetSpec('input_fileset', dicom_format),
        FilesetSpec('output_fileset', nifti_gz_format, 'a_pipeline')
    ]

    def a_pipeline(self):
        pipeline = self.new_pipeline(
            name='a_pipeline',
            desc=("A dummy pipeline used to test dicom-to-nifti "
                  "conversion method"),
            citations=[])
        identity = pipeline.add('identity', IdentityInterface(['field']))
        # Connect inputs
        pipeline.connect_input('input_fileset', identity, 'field')
        # Connect outputs
        pipeline.connect_output('output_fileset', identity, 'field')
        return pipeline
Ejemplo n.º 19
0
class ExistingPrereqAnalysis(with_metaclass(AnalysisMetaClass, Analysis)):

    add_data_specs = [
        InputFilesetSpec('one', text_format),
        FilesetSpec('ten', text_format, 'ten_pipeline'),
        FilesetSpec('hundred', text_format, 'hundred_pipeline'),
        FilesetSpec('thousand', text_format, 'thousand_pipeline')
    ]

    def pipeline_factory(self, incr, input, output, name_maps):
        pipeline = self.new_pipeline(
            name=output + '_pipeline',
            desc="A dummy pipeline used to test 'partial-complete' method",
            citations=[],
            name_maps=name_maps)
        # Nodes
        math = pipeline.add("math", TestMath())
        math.inputs.y = incr
        math.inputs.op = 'add'
        math.inputs.as_file = True
        # Connect inputs
        pipeline.connect_input(input, math, 'x')
        # Connect outputs
        pipeline.connect_output(output, math, 'z')
        return pipeline

    def ten_pipeline(self, **name_maps):
        return self.pipeline_factory(10, 'one', 'ten', name_maps=name_maps)

    def hundred_pipeline(self, **name_maps):
        return self.pipeline_factory(100,
                                     'ten',
                                     'hundred',
                                     name_maps=name_maps)

    def thousand_pipeline(self, **name_maps):
        return self.pipeline_factory(1000,
                                     'hundred',
                                     'thousand',
                                     name_maps=name_maps)
Ejemplo n.º 20
0
class BasicTestAnalysis(with_metaclass(AnalysisMetaClass, Analysis)):

    add_data_specs = [
        InputFilesetSpec('fileset', text_format),
        FilesetSpec('out_fileset', text_format, 'a_pipeline'),
        FilesetSpec('raise_error', text_format, 'raise_error_pipeline')
    ]

    def a_pipeline(self, **name_maps):

        pipeline = self.new_pipeline('a_pipeline',
                                     desc='a dummy pipeline',
                                     citations=[],
                                     name_maps=name_maps)

        pipeline.add('ident',
                     IdentityInterface(['fileset']),
                     inputs={'fileset': ('fileset', text_format)},
                     outputs={'out_fileset': ('fileset', text_format)})

        return pipeline

    def raise_error_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            'raise_error_pipeline',
            desc='a pipeline that always throws an error',
            citations=[],
            name_maps=name_maps)

        pipeline.add('error',
                     ErrorInterface(),
                     inputs={'in_file': ('fileset', text_format)},
                     outputs={'raise_error': ('out_file', text_format)})

        return pipeline
Ejemplo n.º 21
0
class T1Study(T2Study, metaclass=StudyMetaClass):

    desc = "T1-weighted MRI contrast"

    add_data_specs = [
        FilesetSpec('fs_recon_all', zip_format, 'freesurfer_pipeline'),
        InputFilesetSpec(
            't2_coreg',
            STD_IMAGE_FORMATS,
            optional=True,
            desc=("A coregistered T2 image to use in freesurfer to help "
                  "distinguish the peel surface")),
        # Templates
        InputFilesetSpec('suit_mask',
                         STD_IMAGE_FORMATS,
                         frequency='per_study',
                         default=LocalReferenceData('SUIT', nifti_format)),
        FilesetSpec('five_tissue_type',
                    mrtrix_image_format,
                    'gen_5tt_pipeline',
                    desc=("A segmentation image taken from freesurfer output "
                          "and simplified into 5 tissue types. Used in ACT "
                          "streamlines tractography"))
    ] + [
        FilesetSpec('aparc_stats_{}_{}_table'.format(h, m),
                    text_format,
                    'aparc_stats_table_pipeline',
                    frequency='per_visit',
                    pipeline_args={
                        'hemisphere': h,
                        'measure': m
                    },
                    desc=("Table of {} of {} per parcellated segment".format(
                        m, h.upper())))
        for h, m in itertools.product(
            ('lh', 'rh'), ('volume', 'thickness', 'thicknessstd', 'meancurv',
                           'gauscurv', 'foldind', 'curvind'))
    ]

    add_param_specs = [
        # MriStudy.param_spec('bet_method').with_new_choices(default='opti_bet'),
        SwitchSpec('bet_robust', False),
        SwitchSpec('bet_reduce_bias', True),
        SwitchSpec('aparc_atlas',
                   'desikan-killiany',
                   choices=('desikan-killiany', 'destrieux', 'DKT')),
        ParamSpec('bet_f_threshold', 0.1),
        ParamSpec('bet_g_threshold', 0.0)
    ]

    default_bids_inputs = [
        BidsInputs(spec_name='magnitude',
                   type='T1w',
                   valid_formats=(nifti_gz_x_format, nifti_gz_format))
    ]

    primary_scan_name = 'magnitude'

    def freesurfer_pipeline(self, **name_maps):
        """
        Segments grey matter, white matter and CSF from T1 images using
        SPM "NewSegment" function.

        NB: Default values come from the W2MHS toolbox
        """
        pipeline = self.new_pipeline(name='segmentation',
                                     name_maps=name_maps,
                                     desc="Segment white/grey matter and csf",
                                     citations=copy(freesurfer_cites))

        # FS ReconAll node
        recon_all = pipeline.add(
            'recon_all',
            interface=ReconAll(directive='all',
                               openmp=self.processor.num_processes),
            inputs={'T1_files': (self.preproc_spec_name, nifti_gz_format)},
            requirements=[freesurfer_req.v('5.3')],
            wall_time=2000)

        if self.provided('t2_coreg'):
            pipeline.connect_input('t2_coreg', recon_all, 'T2_file',
                                   nifti_gz_format)
            recon_all.inputs.use_T2 = True

        # Wrapper around os.path.join
        pipeline.add('join',
                     JoinPath(),
                     inputs={
                         'dirname': (recon_all, 'subjects_dir'),
                         'filename': (recon_all, 'subject_id')
                     },
                     outputs={'fs_recon_all': ('path', directory_format)})

        return pipeline

    def segmentation_pipeline(self, **name_maps):
        pipeline = super(T1Study, self).segmentation_pipeline(img_type=1,
                                                              **name_maps)
        return pipeline

    def gen_5tt_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            name='gen5tt',
            name_maps=name_maps,
            desc=("Generate 5-tissue-type image used for Anatomically-"
                  "Constrained Tractography (ACT)"))

        aseg_path = pipeline.add(
            'aseg_path',
            AppendPath(sub_paths=['mri', 'aseg.mgz']),
            inputs={'base_path': ('fs_recon_all', directory_format)})

        pipeline.add(
            'gen5tt',
            mrtrix3.Generate5tt(algorithm='freesurfer', out_file='5tt.mif'),
            inputs={'in_file': (aseg_path, 'out_path')},
            outputs={'five_tissue_type': ('out_file', mrtrix_image_format)},
            requirements=[mrtrix_req.v('3.0rc3'),
                          freesurfer_req.v('6.0')])

        return pipeline

    def aparc_stats_table_pipeline(self, measure, hemisphere, **name_maps):

        pipeline = self.new_pipeline(
            name='aparc_stats_{}_{}'.format(hemisphere, measure),
            name_maps=name_maps,
            desc=("Extract statistics from freesurfer outputs"))

        copy_to_dir = pipeline.add('copy_to_subjects_dir',
                                   CopyToDir(),
                                   inputs={
                                       'in_files':
                                       ('fs_recon_all', directory_format),
                                       'file_names': (self.SUBJECT_ID, int)
                                   },
                                   joinsource=self.SUBJECT_ID,
                                   joinfield=['in_files', 'file_names'])

        if self.branch('aparc_atlas', 'desikan-killiany'):
            parc = 'aparc'
        elif self.branch('aparc_atlas', 'destrieux'):
            parc = 'aparc.a2009s'
        elif self.branch('aparc_atlas', 'DKT'):
            parc = 'aparc.DKTatlas40'
        else:
            self.unhandled_branch('aparc_atlas')

        pipeline.add('aparc_stats',
                     AparcStats(measure=measure,
                                hemisphere=hemisphere,
                                parc=parc),
                     inputs={
                         'subjects_dir': (copy_to_dir, 'out_dir'),
                         'subjects': (copy_to_dir, 'file_names')
                     },
                     outputs={
                         'aparc_stats_{}_{}_table'.format(hemisphere, measure):
                         ('tablefile', text_format)
                     },
                     requirements=[freesurfer_req.v('5.3')])

        return pipeline
Ejemplo n.º 22
0
class T1Study(T2Study, metaclass=StudyMetaClass):

    desc = "T1-weighted MRI contrast"

    add_data_specs = [
        FilesetSpec('fs_recon_all', zip_format, 'freesurfer_pipeline'),
        InputFilesetSpec(
            't2_coreg',
            STD_IMAGE_FORMATS,
            optional=True,
            desc=("A coregistered T2 image to use in freesurfer to help "
                  "distinguish the peel surface")),
        # Templates
        InputFilesetSpec('suit_mask',
                         STD_IMAGE_FORMATS,
                         frequency='per_study',
                         default=LocalReferenceData('SUIT', nifti_format)),
        FilesetSpec('five_tissue_type',
                    mrtrix_image_format,
                    'gen_5tt_pipeline',
                    desc=("A segmentation image taken from freesurfer output "
                          "and simplified into 5 tissue types. Used in ACT "
                          "streamlines tractography"))
    ] + [
        FilesetSpec('aparc_stats_{}_{}_table'.format(h, m),
                    text_format,
                    'aparc_stats_table_pipeline',
                    frequency='per_visit',
                    pipeline_args={
                        'hemisphere': h,
                        'measure': m
                    },
                    desc=("Table of {} of {} per parcellated segment".format(
                        m, h.upper())))
        for h, m in itertools.product(
            ('lh', 'rh'), ('volume', 'thickness', 'thicknessstd', 'meancurv',
                           'gauscurv', 'foldind', 'curvind'))
    ]

    add_param_specs = [
        # MriStudy.param_spec('bet_method').with_new_choices(default='opti_bet'),
        SwitchSpec('bet_robust', False),
        SwitchSpec('bet_reduce_bias', True),
        SwitchSpec('aparc_atlas',
                   'desikan-killiany',
                   choices=('desikan-killiany', 'destrieux', 'DKT')),
        ParamSpec('bet_f_threshold', 0.1),
        ParamSpec('bet_g_threshold', 0.0)
    ]

    default_bids_inputs = [
        BidsInputs(spec_name='magnitude',
                   type='T1w',
                   valid_formats=(nifti_gz_x_format, nifti_gz_format))
    ]

    def freesurfer_pipeline(self, **name_maps):
        """
        Segments grey matter, white matter and CSF from T1 images using
        SPM "NewSegment" function.

        NB: Default values come from the W2MHS toolbox
        """
        pipeline = self.new_pipeline(name='segmentation',
                                     name_maps=name_maps,
                                     desc="Segment white/grey matter and csf",
                                     citations=copy(freesurfer_cites))

        # FS ReconAll node
        recon_all = pipeline.add(
            'recon_all',
            interface=ReconAll(directive='all',
                               openmp=self.processor.num_processes),
            inputs={'T1_files': ('mag_preproc', nifti_gz_format)},
            requirements=[freesurfer_req.v('5.3')],
            wall_time=2000)

        if self.provided('t2_coreg'):
            pipeline.connect_input('t2_coreg', recon_all, 'T2_file',
                                   nifti_gz_format)
            recon_all.inputs.use_T2 = True

        # Wrapper around os.path.join
        pipeline.add('join',
                     JoinPath(),
                     inputs={
                         'dirname': (recon_all, 'subjects_dir'),
                         'filename': (recon_all, 'subject_id')
                     },
                     outputs={'fs_recon_all': ('path', directory_format)})

        return pipeline

    def segmentation_pipeline(self, **name_maps):
        pipeline = super(T1Study, self).segmentation_pipeline(img_type=1,
                                                              **name_maps)
        return pipeline

    def gen_5tt_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            name='gen5tt',
            name_maps=name_maps,
            desc=("Generate 5-tissue-type image used for Anatomically-"
                  "Constrained Tractography (ACT)"))

        aseg_path = pipeline.add(
            'aseg_path',
            AppendPath(sub_paths=['mri', 'aseg.mgz']),
            inputs={'base_path': ('fs_recon_all', directory_format)})

        pipeline.add(
            'gen5tt',
            mrtrix3.Generate5tt(algorithm='freesurfer', out_file='5tt.mif'),
            inputs={'in_file': (aseg_path, 'out_path')},
            outputs={'five_tissue_type': ('out_file', mrtrix_image_format)},
            requirements=[mrtrix_req.v('3.0rc3'),
                          freesurfer_req.v('6.0')])

        return pipeline

    def aparc_stats_table_pipeline(self, measure, hemisphere, **name_maps):

        pipeline = self.new_pipeline(
            name='aparc_stats_{}_{}'.format(hemisphere, measure),
            name_maps=name_maps,
            desc=("Extract statistics from freesurfer outputs"))

        copy_to_dir = pipeline.add('copy_to_subjects_dir',
                                   CopyToDir(),
                                   inputs={
                                       'in_files':
                                       ('fs_recon_all', directory_format),
                                       'file_names': (self.SUBJECT_ID, int)
                                   },
                                   joinsource=self.SUBJECT_ID,
                                   joinfield=['in_files', 'file_names'])

        if self.branch('aparc_atlas', 'desikan-killiany'):
            parc = 'aparc'
        elif self.branch('aparc_atlas', 'destrieux'):
            parc = 'aparc.a2009s'
        elif self.branch('aparc_atlas', 'DKT'):
            parc = 'aparc.DKTatlas40'
        else:
            self.unhandled_branch('aparc_atlas')

        pipeline.add('aparc_stats',
                     AparcStats(measure=measure,
                                hemisphere=hemisphere,
                                parc=parc),
                     inputs={
                         'subjects_dir': (copy_to_dir, 'out_dir'),
                         'subjects': (copy_to_dir, 'file_names')
                     },
                     outputs={
                         'aparc_stats_{}_{}_table'.format(hemisphere, measure):
                         ('tablefile', text_format)
                     },
                     requirements=[freesurfer_req.v('5.3')])

        return pipeline

    def bet_T1(self, **name_maps):

        pipeline = self.new_pipeline(
            name='BET_T1',
            name_maps=name_maps,
            desc=("Brain extraction pipeline using FSL's BET"),
            citations=[fsl_cite])

        bias = pipeline.add('n4_bias_correction',
                            ants.N4BiasFieldCorrection(),
                            inputs={'input_image': ('t1', nifti_gz_format)},
                            requirements=[ants_req.v('1.9')],
                            wall_time=60,
                            mem_gb=12)

        pipeline.add('bet',
                     fsl.BET(frac=0.15,
                             reduce_bias=True,
                             output_type='NIFTI_GZ'),
                     inputs={'in_file': (bias, 'output_image')},
                     outputs={
                         'betted_T1': ('out_file', nifti_gz_format),
                         'betted_T1_mask': ('mask_file', nifti_gz_format)
                     },
                     requirements=[fsl_req.v('5.0.8')],
                     mem_gb=8,
                     wall_time=45)

        return pipeline

    def cet_T1(self, **name_maps):
        pipeline = self.new_pipeline(
            name='CET_T1',
            name_maps=name_maps,
            desc=("Construct cerebellum mask using SUIT template"),
            citations=[fsl_cite])

        # FIXME: Should convert to inputs
        nl = self._lookup_nl_tfm_inv_name('MNI')
        linear = self._lookup_l_tfm_to_name('MNI')

        # Initially use MNI space to warp SUIT into T1 and threshold to mask
        merge_trans = pipeline.add('merge_transforms',
                                   Merge(2),
                                   inputs={
                                       'in2': (nl, nifti_gz_format),
                                       'in1': (linear, nifti_gz_format)
                                   })

        apply_trans = pipeline.add('ApplyTransform',
                                   ants.resampling.ApplyTransforms(
                                       interpolation='NearestNeighbor',
                                       input_image_type=3,
                                       invert_transform_flags=[True, False]),
                                   inputs={
                                       'reference_image':
                                       ('betted_T1', nifti_gz_format),
                                       'input_image':
                                       ('suit_mask', nifti_gz_format),
                                       'transforms': (merge_trans, 'out')
                                   },
                                   requirements=[ants_req.v('1.9')],
                                   mem_gb=16,
                                   wall_time=120)

        pipeline.add('maths2',
                     fsl.utils.ImageMaths(suffix='_optiBET_cerebellum',
                                          op_string='-mas'),
                     inputs={
                         'in_file': ('betted_T1', nifti_gz_format),
                         'in_file2': (apply_trans, 'output_image')
                     },
                     outputs={
                         'cetted_T1': ('out_file', nifti_gz_format),
                         'cetted_T1_mask': ('output_image', nifti_gz_format)
                     },
                     requirements=[fsl_req.v('5.0.8')],
                     mem_gb=16,
                     wall_time=5)

        return pipeline
Ejemplo n.º 23
0
class T2starStudy(MriStudy, metaclass=StudyMetaClass):

    desc = "T2*-weighted MRI contrast"

    add_data_specs = [
        # Set the magnitude to be generated from the preprocess_channels
        # pipeline
        FilesetSpec('magnitude',
                    nifti_gz_format,
                    'preprocess_channels_pipeline',
                    desc=("Generated from separate channel signals, "
                          "provided to 'channels'.")),
        # QSM and phase processing
        FilesetSpec('swi', nifti_gz_format, 'swi_pipeline'),
        FilesetSpec('qsm',
                    nifti_gz_format,
                    'qsm_pipeline',
                    desc=("Quantitative susceptibility image resolved "
                          "from T2* coil images")),
        # Vein analysis
        FilesetSpec('composite_vein_image', nifti_gz_format, 'cv_pipeline'),
        FilesetSpec('vein_mask', nifti_gz_format, 'shmrf_pipeline'),
        # Templates
        InputFilesetSpec('mni_template_qsm_prior',
                         STD_IMAGE_FORMATS,
                         frequency='per_study',
                         default=LocalReferenceData('QSMPrior',
                                                    nifti_gz_format)),
        InputFilesetSpec('mni_template_swi_prior',
                         STD_IMAGE_FORMATS,
                         frequency='per_study',
                         default=LocalReferenceData('SWIPrior',
                                                    nifti_gz_format)),
        InputFilesetSpec('mni_template_atlas_prior',
                         STD_IMAGE_FORMATS,
                         frequency='per_study',
                         default=LocalReferenceData('VeinFrequencyPrior',
                                                    nifti_gz_format)),
        InputFilesetSpec('mni_template_vein_atlas',
                         STD_IMAGE_FORMATS,
                         frequency='per_study',
                         default=LocalReferenceData('VeinFrequencyMap',
                                                    nifti_gz_format))
    ]

    add_param_specs = [
        SwitchSpec('qsm_dual_echo', False),
        ParamSpec('qsm_echo',
                  1,
                  desc=("Which echo (by index starting at 1) to use when "
                        "using single echo")),
        ParamSpec('qsm_padding', [12, 12, 12]),
        ParamSpec('qsm_mask_dialation', [11, 11, 11]),
        ParamSpec('qsm_erosion_size', 10),
        SwitchSpec('bet_robust', False),
        SwitchSpec('bet_robust', False),
        ParamSpec('bet_f_threshold', 0.1),
        ParamSpec('bet_g_threshold', 0.0)
    ]

    def preprocess_channels_pipeline(self, **name_maps):
        pipeline = super().preprocess_channels_pipeline(**name_maps)
        # Connect combined first echo output to the magnitude data spec
        pipeline.connect_output('magnitude', pipeline.node('to_polar'),
                                'first_echo', nifti_gz_format)
        return pipeline

    def qsm_pipeline(self, **name_maps):
        """
        Process dual echo data for QSM (TE=[7.38, 22.14])

        NB: Default values come from the STI-Suite
        """
        pipeline = self.new_pipeline(
            name='qsm_pipeline',
            name_maps=name_maps,
            desc="Resolve QSM from t2star coils",
            citations=[sti_cites, fsl_cite, matlab_cite])

        erosion = pipeline.add(
            'mask_erosion',
            fsl.ErodeImage(kernel_shape='sphere',
                           kernel_size=self.parameter('qsm_erosion_size'),
                           output_type='NIFTI'),
            inputs={'in_file': ('brain_mask', nifti_gz_format)},
            requirements=[fsl_req.v('5.0.8')],
            wall_time=15,
            mem_gb=12)

        # If we have multiple echoes we can combine the phase images from
        # each channel into a single image. Otherwise for single echo sequences
        # we need to perform QSM on each coil separately and then combine
        # afterwards.
        if self.branch('qsm_dual_echo'):
            # Combine channels to produce phase and magnitude images
            channel_combine = pipeline.add(
                'channel_combine',
                HIPCombineChannels(),
                inputs={
                    'magnitudes_dir': ('mag_channels', multi_nifti_gz_format),
                    'phases_dir': ('phase_channels', multi_nifti_gz_format)
                })

            # Unwrap phase using Laplacian unwrapping
            unwrap = pipeline.add(
                'unwrap',
                UnwrapPhase(padsize=self.parameter('qsm_padding')),
                inputs={
                    'voxelsize': ('voxel_sizes', float),
                    'in_file': (channel_combine, 'phase')
                },
                requirements=[matlab_req.v('r2017a'),
                              sti_req.v(2.2)])

            # Remove background noise
            vsharp = pipeline.add(
                "vsharp",
                VSharp(mask_manip="imerode({}>0, ball(5))"),
                inputs={
                    'voxelsize': ('voxel_sizes', float),
                    'in_file': (unwrap, 'out_file'),
                    'mask': (erosion, 'out_file')
                },
                requirements=[matlab_req.v('r2017a'),
                              sti_req.v(2.2)])

            # Run QSM iLSQR
            pipeline.add('qsmrecon',
                         QSMiLSQR(mask_manip="{}>0",
                                  padsize=self.parameter('qsm_padding')),
                         inputs={
                             'voxelsize': ('voxel_sizes', float),
                             'te': ('echo_times', float),
                             'B0': ('main_field_strength', float),
                             'H': ('main_field_orient', float),
                             'in_file': (vsharp, 'out_file'),
                             'mask': (vsharp, 'new_mask')
                         },
                         outputs={'qsm': ('qsm', nifti_format)},
                         requirements=[matlab_req.v('r2017a'),
                                       sti_req.v(2.2)])

        else:
            # Dialate eroded mask
            dialate = pipeline.add(
                'dialate',
                DialateMask(dialation=self.parameter('qsm_mask_dialation')),
                inputs={'in_file': (erosion, 'out_file')},
                requirements=[matlab_req.v('r2017a')])

            # List files for the phases of separate channel
            list_phases = pipeline.add(
                'list_phases',
                ListDir(sort_key=coil_sort_key,
                        filter=CoilEchoFilter(self.parameter('qsm_echo'))),
                inputs={
                    'directory': ('phase_channels', multi_nifti_gz_format)
                })

            # List files for the phases of separate channel
            list_mags = pipeline.add(
                'list_mags',
                ListDir(sort_key=coil_sort_key,
                        filter=CoilEchoFilter(self.parameter('qsm_echo'))),
                inputs={'directory': ('mag_channels', multi_nifti_gz_format)})

            # Generate coil specific masks
            mask_coils = pipeline.add(
                'mask_coils',
                MaskCoils(dialation=self.parameter('qsm_mask_dialation')),
                inputs={
                    'masks': (list_mags, 'files'),
                    'whole_brain_mask': (dialate, 'out_file')
                },
                requirements=[matlab_req.v('r2017a')])

            # Unwrap phase
            unwrap = pipeline.add(
                'unwrap',
                BatchUnwrapPhase(padsize=self.parameter('qsm_padding')),
                inputs={
                    'voxelsize': ('voxel_sizes', float),
                    'in_file': (list_phases, 'files')
                },
                requirements=[matlab_req.v('r2017a'),
                              sti_req.v(2.2)])

            # Background phase removal
            vsharp = pipeline.add(
                "vsharp",
                BatchVSharp(mask_manip='{}>0'),
                inputs={
                    'voxelsize': ('voxel_sizes', float),
                    'mask': (mask_coils, 'out_files'),
                    'in_file': (unwrap, 'out_file')
                },
                requirements=[matlab_req.v('r2017a'),
                              sti_req.v(2.2)])

            first_echo_time = pipeline.add(
                'first_echo',
                Select(index=0),
                inputs={'inlist': ('echo_times', float)})

            # Perform channel-wise QSM
            coil_qsm = pipeline.add(
                'coil_qsmrecon',
                BatchQSMiLSQR(mask_manip="{}>0",
                              padsize=self.parameter('qsm_padding')),
                inputs={
                    'voxelsize': ('voxel_sizes', float),
                    'B0': ('main_field_strength', float),
                    'H': ('main_field_orient', float),
                    'in_file': (vsharp, 'out_file'),
                    'mask': (vsharp, 'new_mask'),
                    'te': (first_echo_time, 'out')
                },
                requirements=[matlab_req.v('r2017a'),
                              sti_req.v(2.2)],
                wall_time=45)  # FIXME: Should be dependent on number of coils

            # Combine channel QSM by taking the median coil value
            pipeline.add('combine_qsm',
                         MedianInMasks(),
                         inputs={
                             'channels': (coil_qsm, 'out_file'),
                             'channel_masks': (vsharp, 'new_mask'),
                             'whole_brain_mask': (dialate, 'out_file')
                         },
                         outputs={'qsm': ('out_file', nifti_format)},
                         requirements=[matlab_req.v('r2017a')])
        return pipeline

    def swi_pipeline(self, **name_maps):

        raise NotImplementedError

        pipeline = self.new_pipeline(
            name='swi',
            name_maps=name_maps,
            desc=("Calculate susceptibility-weighted image from magnitude and "
                  "phase"))

        return pipeline

    def cv_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(name='cv_pipeline',
                                     name_maps=name_maps,
                                     desc="Compute Composite Vein Image",
                                     citations=[fsl_cite, matlab_cite])

        # Interpolate priors and atlas
        merge_trans = pipeline.add('merge_transforms',
                                   Merge(3),
                                   inputs={
                                       'in1':
                                       ('coreg_ants_mat', text_matrix_format),
                                       'in2': ('coreg_to_tmpl_ants_mat',
                                               text_matrix_format),
                                       'in3': ('coreg_to_tmpl_ants_warp',
                                               nifti_gz_format)
                                   })

        apply_trans_q = pipeline.add(
            'ApplyTransform_Q_Prior',
            ants.resampling.ApplyTransforms(
                interpolation='Linear',
                input_image_type=3,
                invert_transform_flags=[True, True, False]),
            inputs={
                'input_image': ('mni_template_qsm_prior', nifti_gz_format),
                'reference_image': ('qsm', nifti_gz_format),
                'transforms': (merge_trans, 'out')
            },
            requirements=[ants_req.v('1.9')],
            mem_gb=16,
            wall_time=30)

        apply_trans_s = pipeline.add(
            'ApplyTransform_S_Prior',
            ants.resampling.ApplyTransforms(
                interpolation='Linear',
                input_image_type=3,
                invert_transform_flags=[True, True, False]),
            inputs={
                'input_image': ('mni_template_swi_prior', nifti_gz_format),
                'reference_image': ('qsm', nifti_gz_format),
                'transforms': (merge_trans, 'out')
            },
            requirements=[ants_req.v('1.9')],
            mem_gb=16,
            wall_time=30)

        apply_trans_a = pipeline.add(
            'ApplyTransform_A_Prior',
            ants.resampling.ApplyTransforms(
                interpolation='Linear',
                input_image_type=3,
                invert_transform_flags=[True, True, False],
            ),
            inputs={
                'reference_image': ('qsm', nifti_gz_format),
                'input_image': ('mni_template_atlas_prior', nifti_gz_format),
                'transforms': (merge_trans, 'out')
            },
            requirements=[ants_req.v('1.9')],
            mem_gb=16,
            wall_time=30)

        apply_trans_v = pipeline.add(
            'ApplyTransform_V_Atlas',
            ants.resampling.ApplyTransforms(
                interpolation='Linear',
                input_image_type=3,
                invert_transform_flags=[True, True, False]),
            inputs={
                'input_image': ('mni_template_vein_atlas', nifti_gz_format),
                'reference_image': ('qsm', nifti_gz_format),
                'transforms': (merge_trans, 'out')
            },
            requirements=[ants_req.v('1.9')],
            mem_gb=16,
            wall_time=30)

        # Run CV code
        pipeline.add(
            'cv_image',
            interface=CompositeVeinImage(),
            inputs={
                'mask': ('brain_mask', nifti_format),
                'qsm': ('qsm', nifti_format),
                'swi': ('swi', nifti_format),
                'q_prior': (apply_trans_q, 'output_image'),
                's_prior': (apply_trans_s, 'output_image'),
                'a_prior': (apply_trans_a, 'output_image'),
                'vein_atlas': (apply_trans_v, 'output_image')
            },
            outputs={'composite_vein_image': ('out_file', nifti_format)},
            requirements=[matlab_req.v('R2015a')],
            wall_time=300,
            mem_gb=24)

        return pipeline

    def shmrf_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(name='shmrf_pipeline',
                                     name_maps=name_maps,
                                     desc="Compute Vein Mask using ShMRF",
                                     citations=[fsl_cite, matlab_cite])

        # Run ShMRF code
        pipeline.add('shmrf',
                     ShMRF(),
                     inputs={
                         'in_file': ('composite_vein_image', nifti_format),
                         'mask': ('brain_mask', nifti_format)
                     },
                     outputs={'vein_mask': ('out_file', nifti_format)},
                     requirements=[matlab_req.v('R2015a')],
                     wall_time=30,
                     mem_gb=16)

        return pipeline

    def cet_T2s(self, **options):

        pipeline = self.new_pipeline(
            name='CET_T2s',
            desc=("Construct cerebellum mask using SUIT template"),
            default_options={
                'SUIT_mask': self._lookup_template_mask_path('SUIT')
            },
            citations=[fsl_cite],
            options=options)

        # Initially use MNI space to warp SUIT mask into T2s space
        merge_trans = pipeline.add(
            'merge_transforms',
            Merge(3),
            inputs={
                'in3': (self._lookup_nl_tfm_inv_name('SUIT'), nifti_gz_format),
                'in2': (self._lookup_l_tfm_to_name('SUIT'), nifti_gz_format),
                'in1': ('T2s_to_T1_mat', text_matrix_format)
            })

        apply_trans = pipeline.add(
            'ApplyTransform',
            ants.resampling.ApplyTransforms(
                interpolation='NearestNeighbor',
                input_image_type=3,
                invert_transform_flags=[True, True, False],
                input_image=pipeline.option('SUIT_mask')),
            inputs={
                'transforms': (merge_trans, 'out'),
                'reference_image': ('betted_T2s', nifti_gz_format)
            },
            outputs={'cetted_T2s_mask': ('output_image', nifti_gz_format)},
            requirements=[ants_req.v('1.9')],
            mem_gb=16,
            wall_time=120)

        # Combine masks
        maths1 = pipeline.add('combine_masks',
                              fsl.utils.ImageMaths(suffix='_optiBET_masks',
                                                   op_string='-mas',
                                                   output_type='NIFTI_GZ'),
                              inputs={
                                  'in_file':
                                  ('betted_T2s_mask', nifti_gz_format),
                                  'in_file2': (apply_trans, 'output_image')
                              },
                              requirements=[fsl_req.v('5.0.8')],
                              mem_gb=16,
                              wall_time=5)

        # Mask out t2s image
        pipeline.add('mask_t2s',
                     fsl.utils.ImageMaths(suffix='_optiBET_cerebellum',
                                          op_string='-mas',
                                          output_type='NIFTI_GZ'),
                     inputs={
                         'in_file': ('betted_T2s', nifti_gz_format),
                         'in_file2': (maths1, 'output_image')
                     },
                     outputs={'cetted_T2s': ('out_file', nifti_gz_format)},
                     requirements=[fsl_req.v('5.0.8')],
                     mem_gb=16,
                     wall_time=5)

        pipeline.add(
            'mask_t2s_last_echo',
            fsl.utils.ImageMaths(suffix='_optiBET_cerebellum',
                                 op_string='-mas',
                                 output_type='NIFTI_GZ'),
            inputs={
                'in_file': ('betted_T2s_last_echo', nifti_gz_format),
                'in_file2': (maths1, 'output_image')
            },
            outputs={'cetted_T2s_last_echo': ('out_file', nifti_gz_format)},
            requirements=[fsl_req.v('5.0.8')],
            mem_gb=16,
            wall_time=5)

        return pipeline

    def bet_T2s(self, **options):

        pipeline = self.new_pipeline(name='BET_T2s',
                                     desc=("python implementation of BET"),
                                     default_options={},
                                     citations=[fsl_cite],
                                     options=options)

        bet = pipeline.add('bet',
                           fsl.BET(frac=0.1, mask=True,
                                   output_type='NIFTI_GZ'),
                           inputs={'in_file': ('t2s', nifti_gz_format)},
                           outputs={
                               'betted_T2s': ('out_file', nifti_gz_format),
                               'betted_T2s_mask':
                               ('mask_file', nifti_gz_format)
                           },
                           requirements=[fsl_req.v('5.0.8')],
                           mem_gb=8,
                           wall_time=45)

        pipeline.add(
            'mask',
            fsl.utils.ImageMaths(suffix='_BET_brain',
                                 op_string='-mas',
                                 output_type='NIFTI_GZ'),
            inputs={
                'in_file': ('t2s_last_echo', nifti_gz_format),
                'in_file2': (bet, 'mask_file')
            },
            outputs={'betted_T2s_last_echo': ('out_file', nifti_gz_format)},
            requirements=[fsl_req.v('5.0.8')],
            mem_gb=16,
            wall_time=5)

        return pipeline
Ejemplo n.º 24
0
class MotionDetectionMixin(MultiStudy, metaclass=MultiStudyMetaClass):

    #     add_substudy_specs = [
    #         SubStudySpec('pet_mc', PetStudy)]

    add_data_specs = [
        InputFilesetSpec('pet_data_dir', directory_format, optional=True),
        InputFilesetSpec('pet_data_reconstructed',
                         directory_format,
                         optional=True),
        InputFilesetSpec('struct2align', nifti_gz_format, optional=True),
        InputFilesetSpec('umap', dicom_format, optional=True),
        FilesetSpec('pet_data_prepared', directory_format,
                    'prepare_pet_pipeline'),
        FilesetSpec('static_motion_correction_results', directory_format,
                    'motion_correction_pipeline'),
        FilesetSpec('dynamic_motion_correction_results', directory_format,
                    'motion_correction_pipeline'),
        FilesetSpec('mean_displacement', text_format,
                    'mean_displacement_pipeline'),
        FilesetSpec('mean_displacement_rc', text_format,
                    'mean_displacement_pipeline'),
        FilesetSpec('mean_displacement_consecutive', text_format,
                    'mean_displacement_pipeline'),
        FilesetSpec('mats4average', text_format, 'mean_displacement_pipeline'),
        FilesetSpec('start_times', text_format, 'mean_displacement_pipeline'),
        FilesetSpec('motion_par_rc', text_format,
                    'mean_displacement_pipeline'),
        FilesetSpec('motion_par', text_format, 'mean_displacement_pipeline'),
        FilesetSpec('offset_indexes', text_format,
                    'mean_displacement_pipeline'),
        FilesetSpec('severe_motion_detection_report', text_format,
                    'mean_displacement_pipeline'),
        FilesetSpec('frame_start_times', text_format,
                    'motion_framing_pipeline'),
        FilesetSpec('frame_vol_numbers', text_format,
                    'motion_framing_pipeline'),
        FilesetSpec('timestamps', directory_format, 'motion_framing_pipeline'),
        FilesetSpec('mean_displacement_plot', png_format,
                    'plot_mean_displacement_pipeline'),
        FilesetSpec('rotation_plot', png_format,
                    'plot_mean_displacement_pipeline'),
        FilesetSpec('translation_plot', png_format,
                    'plot_mean_displacement_pipeline'),
        FilesetSpec('average_mats', directory_format,
                    'frame_mean_transformation_mats_pipeline'),
        FilesetSpec('correction_factors', text_format,
                    'pet_correction_factors_pipeline'),
        FilesetSpec('umaps_align2ref', directory_format,
                    'umap_realignment_pipeline'),
        FilesetSpec('umap_aligned_dicoms', directory_format,
                    'nifti2dcm_conversion_pipeline'),
        FilesetSpec('motion_detection_output', directory_format,
                    'gather_outputs_pipeline'),
        FilesetSpec('moco_series', directory_format,
                    'create_moco_series_pipeline'),
        FilesetSpec('fixed_binning_mats', directory_format,
                    'fixed_binning_pipeline'),
        FieldSpec('pet_duration', int, 'pet_header_extraction_pipeline'),
        FieldSpec('pet_end_time', str, 'pet_header_extraction_pipeline'),
        FieldSpec('pet_start_time', str, 'pet_header_extraction_pipeline')
    ]

    add_param_specs = [
        ParamSpec('framing_th', 2.0),
        ParamSpec('framing_temporal_th', 30.0),
        ParamSpec('framing_duration', 0),
        ParamSpec('md_framing', True),
        ParamSpec('align_pct', False),
        ParamSpec('align_fixed_binning', False),
        ParamSpec('moco_template',
                  os.path.join(reference_path, 'moco_template.IMA')),
        ParamSpec('PET_template_MNI',
                  os.path.join(template_path, 'PET_template_MNI.nii.gz')),
        ParamSpec('fixed_binning_n_frames', 0),
        ParamSpec('pet_offset', 0),
        ParamSpec('fixed_binning_bin_len', 60),
        ParamSpec('crop_xmin', 100),
        ParamSpec('crop_xsize', 130),
        ParamSpec('crop_ymin', 100),
        ParamSpec('crop_ysize', 130),
        ParamSpec('crop_zmin', 20),
        ParamSpec('crop_zsize', 100),
        ParamSpec('PET2MNI_reg', False),
        ParamSpec('dynamic_pet_mc', False)
    ]

    def mean_displacement_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            name='mean_displacement_calculation',
            desc=("Calculate the mean displacement between each motion"
                  " matrix and a reference."),
            citations=[fsl_cite],
            name_maps=name_maps)

        motion_mats_in = {}
        tr_in = {}
        start_time_in = {}
        real_duration_in = {}
        merge_index = 1
        input_names = []
        for spec in self.substudy_specs():
            try:
                spec.map('motion_mats')
            except ArcanaNameError:
                pass  # Sub study doesn't have motion mats spec
            else:
                k = 'in{}'.format(merge_index)
                motion_mats_in[k] = (spec.map('motion_mats'),
                                     motion_mats_format)
                tr_in[k] = (spec.map('tr'), float)
                start_time_in[k] = (spec.map('start_time'), float)
                real_duration_in[k] = (spec.map('real_duration'), float)
                input_names.append(
                    self.spec(spec.map(
                        spec.study_class.primary_scan_name)).pattern)
                merge_index += 1

        merge_motion_mats = pipeline.add('merge_motion_mats',
                                         Merge(len(motion_mats_in)),
                                         inputs=motion_mats_in)

        merge_tr = pipeline.add('merge_tr', Merge(len(tr_in)), inputs=tr_in)

        merge_start_time = pipeline.add('merge_start_time',
                                        Merge(len(start_time_in)),
                                        inputs=start_time_in)

        merge_real_duration = pipeline.add('merge_real_duration',
                                           Merge(len(real_duration_in)),
                                           inputs=real_duration_in)

        pipeline.add(
            'scan_time_info',
            MeanDisplacementCalculation(input_names=input_names),
            inputs={
                'motion_mats': (merge_motion_mats, 'out'),
                'trs': (merge_tr, 'out'),
                'start_times': (merge_start_time, 'out'),
                'real_durations': (merge_real_duration, 'out'),
                'reference': ('ref_brain', nifti_gz_format)
            },
            outputs={
                'mean_displacement': ('mean_displacement', text_format),
                'mean_displacement_rc': ('mean_displacement_rc', text_format),
                'mean_displacement_consecutive':
                ('mean_displacement_consecutive', text_format),
                'start_times': ('start_times', text_format),
                'motion_par_rc': ('motion_parameters_rc', text_format),
                'motion_par': ('motion_parameters', text_format),
                'offset_indexes': ('offset_indexes', text_format),
                'mats4average': ('mats4average', text_format),
                'severe_motion_detection_report':
                ('corrupted_volumes', text_format)
            })

        return pipeline

    def motion_framing_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            name='motion_framing',
            desc=("Calculate when the head movement exceeded a "
                  "predefined threshold (default 2mm)."),
            citations=[fsl_cite],
            name_maps=name_maps)

        framing = pipeline.add(
            'motion_framing',
            MotionFraming(
                motion_threshold=self.parameter('framing_th'),
                temporal_threshold=self.parameter('framing_temporal_th'),
                pet_offset=self.parameter('pet_offset'),
                pet_duration=self.parameter('framing_duration')),
            inputs={
                'mean_displacement': ('mean_displacement', text_format),
                'mean_displacement_consec':
                ('mean_displacement_consecutive', text_format),
                'start_times': ('start_times', text_format)
            },
            outputs={
                'frame_start_times': ('frame_start_times', text_format),
                'frame_vol_numbers': ('frame_vol_numbers', text_format),
                'timestamps': ('timestamps_dir', directory_format)
            })

        if 'pet_data_dir' in self.input_names:
            pipeline.connect_input('pet_start_time', framing, 'pet_start_time')
            pipeline.connect_input('pet_end_time', framing, 'pet_end_time')

        return pipeline

    def plot_mean_displacement_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            name='plot_mean_displacement',
            desc=("Plot the mean displacement real clock"),
            citations=[fsl_cite],
            name_maps=name_maps)

        pipeline.add(
            'plot_md',
            PlotMeanDisplacementRC(framing=self.parameter('md_framing')),
            inputs={
                'mean_disp_rc': ('mean_displacement_rc', text_format),
                'false_indexes': ('offset_indexes', text_format),
                'frame_start_times': ('frame_start_times', text_format),
                'motion_par_rc': ('motion_par_rc', text_format)
            },
            outputs={
                'mean_displacement_plot': ('mean_disp_plot', png_format),
                'rotation_plot': ('rot_plot', png_format),
                'translation_plot': ('trans_plot', png_format)
            })

        return pipeline

    def frame_mean_transformation_mats_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            name='frame_mean_transformation_mats',
            desc=("Average all the transformation mats within each "
                  "detected frame."),
            citations=[fsl_cite],
            name_maps=name_maps)

        pipeline.add(
            'mats_averaging',
            AffineMatAveraging(),
            inputs={
                'frame_vol_numbers': ('frame_vol_numbers', text_format),
                'all_mats4average': ('mats4average', text_format)
            },
            outputs={'average_mats': ('average_mats', directory_format)})

        return pipeline

    def fixed_binning_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            name='fixed_binning',
            desc=("Pipeline to generate average motion matrices for "
                  "each bin in a dynamic PET reconstruction experiment."
                  "This will be the input for the dynamic motion correction."),
            citations=[fsl_cite],
            name_maps=name_maps)

        pipeline.add(
            'fixed_binning',
            FixedBinning(n_frames=self.parameter('fixed_binning_n_frames'),
                         pet_offset=self.parameter('pet_offset'),
                         bin_len=self.parameter('fixed_binning_bin_len')),
            inputs={
                'start_times': ('start_times', text_format),
                'pet_start_time': ('pet_start_time', str),
                'pet_duration': ('pet_duration', int),
                'motion_mats': ('mats4average', text_format)
            },
            outputs={
                'fixed_binning_mats': ('average_bin_mats', directory_format)
            })

        return pipeline

    def pet_correction_factors_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            name='pet_correction_factors',
            desc=("Pipeline to calculate the correction factors to "
                  "account for frame duration when averaging the PET "
                  "frames to create the static PET image"),
            citations=[fsl_cite],
            name_maps=name_maps)

        pipeline.add(
            'pet_corr_factors',
            PetCorrectionFactor(),
            inputs={'timestamps': ('timestamps', directory_format)},
            outputs={'correction_factors': ('corr_factors', text_format)})

        return pipeline

    def nifti2dcm_conversion_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            name='conversion_to_dicom',
            desc=("Conversing aligned umap from nifti to dicom format - "
                  "parallel implementation"),
            citations=(),
            name_maps=name_maps)

        list_niftis = pipeline.add(
            'list_niftis',
            ListDir(),
            inputs={'directory': ('umaps_align2ref', directory_format)})

        reorient_niftis = pipeline.add('reorient_niftis',
                                       ReorientUmap(),
                                       inputs={
                                           'niftis': (list_niftis, 'files'),
                                           'umap': ('umap', dicom_format)
                                       },
                                       requirements=[mrtrix_req.v('3.0rc3')])

        list_dicoms = pipeline.add(
            'list_dicoms',
            ListDir(sort_key=dicom_fname_sort_key),
            inputs={'directory': ('umap', dicom_format)})

        nii2dicom = pipeline.add(
            'nii2dicom',
            Nii2Dicom(
                # extension='Frame',  #  nii2dicom parameter
            ),
            inputs={'reference_dicom': (list_dicoms, 'files')},
            outputs={'in_file': (reorient_niftis, 'reoriented_umaps')},
            iterfield=['in_file'],
            wall_time=20)

        pipeline.add(
            'copy2dir',
            CopyToDir(extension='Frame'),
            inputs={'in_files': (nii2dicom, 'out_file')},
            outputs={'umap_aligned_dicoms': ('out_dir', directory_format)})

        return pipeline

    def umap_realignment_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            name='umap_realignment',
            desc=("Pipeline to align the original umap (if provided)"
                  "to match the head position in each frame and improve the "
                  "static PET image quality."),
            citations=[fsl_cite],
            name_maps=name_maps)

        pipeline.add(
            'umap2ref_alignment',
            UmapAlign2Reference(pct=self.parameter('align_pct')),
            inputs={
                'ute_regmat': ('umap_ref_coreg_matrix', text_matrix_format),
                'ute_qform_mat': ('umap_ref_qform_mat', text_matrix_format),
                'average_mats': ('average_mats', directory_format),
                'umap': ('umap', nifti_gz_format)
            },
            outputs={'umaps_align2ref': ('umaps_align2ref', directory_format)},
            requirements=[fsl_req.v('5.0.9')])

        return pipeline

    def create_moco_series_pipeline(self, **name_maps):
        """This pipeline is probably wrong as we still do not know how to
        import back the new moco series into the scanner. This was just a first
        attempt.
        """

        pipeline = self.new_pipeline(
            name='create_moco_series',
            desc=("Pipeline to generate a moco_series that can be then "
                  "imported back in the scanner and used to correct the"
                  " pet data"),
            citations=[fsl_cite],
            name_maps=name_maps)

        pipeline.add(
            'create_moco_series',
            CreateMocoSeries(moco_template=self.parameter('moco_template')),
            inputs={
                'start_times': ('start_times', text_format),
                'motion_par': ('motion_par', text_format)
            },
            outputs={'moco_series': ('modified_moco', directory_format)})

        return pipeline

    def gather_outputs_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            name='gather_motion_detection_outputs',
            desc=("Pipeline to gather together all the outputs from "
                  "the motion detection pipeline."),
            citations=[fsl_cite],
            name_maps=name_maps)

        merge_inputs = pipeline.add(
            'merge_inputs',
            Merge(5),
            inputs={
                'in1': ('mean_displacement_plot', png_format),
                'in2': ('motion_par', text_format),
                'in3': ('correction_factors', text_format),
                'in4': ('severe_motion_detection_report', text_format),
                'in5': ('timestamps', directory_format)
            })

        pipeline.add(
            'copy2dir',
            CopyToDir(),
            inputs={'in_files': (merge_inputs, 'out')},
            outputs={'motion_detection_output': ('out_dir', directory_format)})

        return pipeline

    prepare_pet_pipeline = MultiStudy.translate(
        'pet_mc', 'pet_data_preparation_pipeline')

    pet_header_extraction_pipeline = MultiStudy.translate(
        'pet_mc', 'pet_time_info_extraction_pipeline')

    def motion_correction_pipeline(self, **name_maps):

        if 'struct2align' in self.input_names:
            StructAlignment = True
        else:
            StructAlignment = False

        pipeline = self.new_pipeline(
            name='pet_mc',
            desc=("Given a folder with reconstructed PET data, this "
                  "pipeline will generate a motion corrected PET"
                  "image using information extracted from the MR-based "
                  "motion detection pipeline"),
            citations=[fsl_cite],
            name_maps=name_maps)

        check_pet = pipeline.add(
            'check_pet_data',
            CheckPetMCInputs(),
            inputs={
                'pet_data': ('pet_data_prepared', directory_format),
                'reference': ('ref_brain', nifti_gz_format)
            },
            requirements=[fsl_req.v('5.0.9'),
                          mrtrix_req.v('3.0rc3')])
        if self.branch('dynamic_pet_mc'):
            pipeline.connect_input('fixed_binning_mats', check_pet,
                                   'motion_mats')
        else:
            pipeline.connect_input('average_mats', check_pet, 'motion_mats')
            pipeline.connect_input('correction_factors', check_pet,
                                   'corr_factors')

        if StructAlignment:
            struct_reg = pipeline.add('ref2structural_reg',
                                      FLIRT(dof=6,
                                            cost_func='normmi',
                                            cost='normmi',
                                            output_type='NIFTI_GZ'),
                                      inputs={
                                          'reference':
                                          ('ref_brain', nifti_gz_format),
                                          'in_file':
                                          ('struct2align', nifti_gz_format)
                                      },
                                      requirements=[fsl_req.v('5.0.9')])

        if self.branch('dynamic_pet_mc'):
            pet_mc = pipeline.add('pet_mc',
                                  PetImageMotionCorrection(),
                                  inputs={
                                      'pet_image': (check_pet, 'pet_images'),
                                      'motion_mat': (check_pet, 'motion_mats'),
                                      'pet2ref_mat': (check_pet, 'pet2ref_mat')
                                  },
                                  requirements=[fsl_req.v('5.0.9')],
                                  iterfield=['pet_image', 'motion_mat'])
        else:
            pet_mc = pipeline.add(
                'pet_mc',
                PetImageMotionCorrection(),
                inputs={'corr_factor': (check_pet, 'corr_factors')},
                requirements=[fsl_req.v('5.0.9')],
                iterfield=['corr_factor', 'pet_image', 'motion_mat'])

        if StructAlignment:
            pipeline.connect(struct_reg, 'out_matrix_file', pet_mc,
                             'structural2ref_regmat')
            pipeline.connect_input('struct2align', pet_mc, 'structural_image')
        if self.parameter('PET2MNI_reg'):
            mni_reg = True
        else:
            mni_reg = False

        if self.branch('dynamic_pet_mc'):
            merge_mc = pipeline.add(
                'merge_pet_mc',
                fsl.Merge(dimension='t'),
                inputs={'in_files': (pet_mc, 'pet_mc_image')},
                requirements=[fsl_req.v('5.0.9')])

            merge_no_mc = pipeline.add(
                'merge_pet_no_mc',
                fsl.Merge(dimension='t'),
                inputs={'in_files': (pet_mc, 'pet_no_mc_image')},
                requirements=[fsl_req.v('5.0.9')])
        else:
            static_mc = pipeline.add('static_mc_generation',
                                     StaticPETImageGeneration(),
                                     inputs={
                                         'pet_mc_images':
                                         (pet_mc, 'pet_mc_image'),
                                         'pet_no_mc_images':
                                         (pet_mc, 'pet_no_mc_image')
                                     },
                                     requirements=[fsl_req.v('5.0.9')])

        merge_outputs = pipeline.add(
            'merge_outputs',
            Merge(3),
            inputs={'in1': ('mean_displacement_plot', png_format)})

        if not StructAlignment:
            cropping = pipeline.add(
                'pet_cropping',
                PETFovCropping(x_min=self.parameter('crop_xmin'),
                               x_size=self.parameter('crop_xsize'),
                               y_min=self.parameter('crop_ymin'),
                               y_size=self.parameter('crop_ysize'),
                               z_min=self.parameter('crop_zmin'),
                               z_size=self.parameter('crop_zsize')))
            if self.branch('dynamic_pet_mc'):
                pipeline.connect(merge_mc, 'merged_file', cropping,
                                 'pet_image')
            else:
                pipeline.connect(static_mc, 'static_mc', cropping, 'pet_image')

            cropping_no_mc = pipeline.add(
                'pet_no_mc_cropping',
                PETFovCropping(x_min=self.parameter('crop_xmin'),
                               x_size=self.parameter('crop_xsize'),
                               y_min=self.parameter('crop_ymin'),
                               y_size=self.parameter('crop_ysize'),
                               z_min=self.parameter('crop_zmin'),
                               z_size=self.parameter('crop_zsize')))
            if self.branch('dynamic_pet_mc'):
                pipeline.connect(merge_no_mc, 'merged_file', cropping_no_mc,
                                 'pet_image')
            else:
                pipeline.connect(static_mc, 'static_no_mc', cropping_no_mc,
                                 'pet_image')

            if mni_reg:
                if self.branch('dynamic_pet_mc'):
                    t_mean = pipeline.add(
                        'PET_temporal_mean',
                        ImageMaths(op_string='-Tmean'),
                        inputs={'in_file': (cropping, 'pet_cropped')},
                        requirements=[fsl_req.v('5.0.9')])

                reg_tmean2MNI = pipeline.add(
                    'reg2MNI',
                    AntsRegSyn(num_dimensions=3,
                               transformation='s',
                               out_prefix='reg2MNI',
                               num_threads=4,
                               ref_file=self.parameter('PET_template_MNI')),
                    wall_time=25,
                    requirements=[ants_req.v('2')])

                if self.branch('dynamic_pet_mc'):
                    pipeline.connect(t_mean, 'out_file', reg_tmean2MNI,
                                     'input_file')

                    merge_trans = pipeline.add('merge_transforms',
                                               Merge(2),
                                               inputs={
                                                   'in1': (reg_tmean2MNI,
                                                           'warp_file'),
                                                   'in2':
                                                   (reg_tmean2MNI, 'regmat')
                                               },
                                               wall_time=1)

                    apply_trans = pipeline.add(
                        'apply_trans',
                        ApplyTransforms(
                            reference_image=self.parameter('PET_template_MNI'),
                            interpolation='Linear',
                            input_image_type=3),
                        inputs={
                            'input_image': (cropping, 'pet_cropped'),
                            'transforms': (merge_trans, 'out')
                        },
                        wall_time=7,
                        mem_gb=24,
                        requirements=[ants_req.v('2')])
                    pipeline.connect(apply_trans, 'output_image',
                                     merge_outputs, 'in2'),
                else:
                    pipeline.connect(cropping, 'pet_cropped', reg_tmean2MNI,
                                     'input_file')
                    pipeline.connect(reg_tmean2MNI, 'reg_file', merge_outputs,
                                     'in2')
            else:
                pipeline.connect(cropping, 'pet_cropped', merge_outputs, 'in2')
            pipeline.connect(cropping_no_mc, 'pet_cropped', merge_outputs,
                             'in3')
        else:
            if self.branch('dynamic_pet_mc'):
                pipeline.connect(merge_mc, 'merged_file', merge_outputs, 'in2')
                pipeline.connect(merge_no_mc, 'merged_file', merge_outputs,
                                 'in3')
            else:
                pipeline.connect(static_mc, 'static_mc', merge_outputs, 'in2')
                pipeline.connect(static_mc, 'static_no_mc', merge_outputs,
                                 'in3')


#         mcflirt = pipeline.add('mcflirt', MCFLIRT())
#                 'in_file': (merge_mc_ps, 'merged_file'),
#                 cost='normmi',

        copy2dir = pipeline.add('copy2dir',
                                CopyToDir(),
                                inputs={'in_files': (merge_outputs, 'out')})
        if self.branch('dynamic_pet_mc'):
            pipeline.connect_output('dynamic_motion_correction_results',
                                    copy2dir, 'out_dir')
        else:
            pipeline.connect_output('static_motion_correction_results',
                                    copy2dir, 'out_dir')
        return pipeline
Ejemplo n.º 25
0
class MriStudy(Study, metaclass=StudyMetaClass):

    add_data_specs = [
        InputFilesetSpec('magnitude', STD_IMAGE_FORMATS,
                         desc=("Typically the primary scan acquired from "
                               "the scanner for the given contrast")),
        InputFilesetSpec(
            'coreg_ref', STD_IMAGE_FORMATS,
            desc=("A reference scan to coregister the primary scan to. Should "
                  "not be brain extracted"),
            optional=True),
        InputFilesetSpec(
            'coreg_ref_brain', STD_IMAGE_FORMATS,
            desc=("A brain-extracted reference scan to coregister a brain-"
                  "extracted scan to. Note that the output of the "
                  "registration brain_coreg can also be derived by brain "
                  "extracting the output of coregistration performed "
                  "before brain extraction if 'coreg_ref' is provided"),
            optional=True),
        InputFilesetSpec(
            'channels', (multi_nifti_gz_format, zip_format),
            optional=True, desc=("Reconstructed complex image for each "
                                 "coil without standardisation.")),
        InputFilesetSpec('header_image', dicom_format, desc=(
            "A dataset that contains correct the header information for the "
            "acquired image. Used to copy geometry over preprocessed "
            "channels"), optional=True),
        FilesetSpec('mag_preproc', nifti_gz_format, 'prepare_pipeline',
                    desc=("Magnitude after basic preprocessing, such as "
                          "realigning image axis to a standard rotation")),
        FilesetSpec('mag_channels', multi_nifti_gz_format,
                    'preprocess_channels_pipeline'),
        FilesetSpec('phase_channels', multi_nifti_gz_format,
                    'preprocess_channels_pipeline'),
        FilesetSpec('brain', nifti_gz_format, 'brain_extraction_pipeline',
                    desc="The brain masked image"),
        FilesetSpec('brain_mask', nifti_gz_format, 'brain_extraction_pipeline',
                    desc="Mask of the brain"),
        FilesetSpec('mag_coreg', nifti_gz_format, 'coreg_pipeline',
                    desc="Head image coregistered to 'coreg_ref'"),
        FilesetSpec('brain_coreg', nifti_gz_format,
                    'brain_coreg_pipeline',
                    desc=("Either brain-extracted image coregistered to "
                          "'coreg_ref_brain' or a brain extraction of a "
                          "coregistered (incl. skull) image")),
        FilesetSpec('brain_mask_coreg', nifti_gz_format,
                    'brain_coreg_pipeline',
                    desc=("Either brain-extracted image coregistered to "
                          "'coreg_ref_brain' or a brain extraction of a "
                          "coregistered (incl. skull) image")),
        FilesetSpec('coreg_ants_mat', text_matrix_format,
                    'coreg_ants_mat_pipeline'),
        FilesetSpec('coreg_fsl_mat', text_matrix_format,
                    'coreg_fsl_mat_pipeline'),
        FilesetSpec('mag_coreg_to_tmpl', nifti_gz_format,
                    'coreg_to_tmpl_pipeline'),
        FilesetSpec('coreg_to_tmpl_fsl_coeff', nifti_gz_format,
                    'coreg_to_tmpl_pipeline'),
        FilesetSpec('coreg_to_tmpl_fsl_report', gif_format,
                    'coreg_to_tmpl_pipeline'),
        FilesetSpec('coreg_to_tmpl_ants_mat', text_matrix_format,
                    'coreg_to_tmpl_pipeline'),
        FilesetSpec('coreg_to_tmpl_ants_warp', nifti_gz_format,
                    'coreg_to_tmpl_pipeline'),
        FilesetSpec('motion_mats', motion_mats_format, 'motion_mat_pipeline'),
        FilesetSpec('qformed', nifti_gz_format, 'qform_transform_pipeline'),
        FilesetSpec('qform_mat', text_matrix_format,
                    'qform_transform_pipeline'),
        FieldSpec('tr', float, 'header_extraction_pipeline'),
        FieldSpec('echo_times', float, 'header_extraction_pipeline',
                  array=True),
        FieldSpec('voxel_sizes', float, 'header_extraction_pipeline',
                  array=True),
        FieldSpec('main_field_orient', float, 'header_extraction_pipeline',
                  array=True),
        FieldSpec('main_field_strength', float, 'header_extraction_pipeline'),
        FieldSpec('start_time', float, 'header_extraction_pipeline'),
        FieldSpec('real_duration', float, 'header_extraction_pipeline'),
        FieldSpec('total_duration', float, 'header_extraction_pipeline'),
        FieldSpec('ped', str, 'header_extraction_pipeline'),
        FieldSpec('pe_angle', float, 'header_extraction_pipeline'),
        # Templates
        InputFilesetSpec('template', STD_IMAGE_FORMATS, frequency='per_study',
                         default=FslReferenceData(
                             'MNI152_T1',
                             format=nifti_gz_format,
                             resolution='mni_template_resolution')),
        InputFilesetSpec('template_brain', STD_IMAGE_FORMATS,
                         frequency='per_study',
                         default=FslReferenceData(
                             'MNI152_T1',
                             format=nifti_gz_format,
                             resolution='mni_template_resolution',
                             dataset='brain')),
        InputFilesetSpec('template_mask', STD_IMAGE_FORMATS,
                         frequency='per_study',
                         default=FslReferenceData(
                             'MNI152_T1',
                             format=nifti_gz_format,
                             resolution='mni_template_resolution',
                             dataset='brain_mask'))]

    add_param_specs = [
        SwitchSpec('resample_coreg_ref', False,
                   desc=("Whether to resample the coregistration reference "
                         "image to the resolution of the moving image")),
        SwitchSpec('reorient_to_std', True),
        ParamSpec('force_channel_flip', None, dtype=str, array=True,
                      desc=("Forcibly flip channel inputs during preprocess "
                            "channels to correct issues with channel recon. "
                            "The inputs are passed directly through to FSL's "
                            "swapdims (see fsl.SwapDimensions interface)")),
        SwitchSpec('bet_robust', True),
        ParamSpec('bet_f_threshold', 0.5),
        SwitchSpec('bet_reduce_bias', False,
                   desc="Only used if not 'bet_robust'"),
        ParamSpec('bet_g_threshold', 0.0),
        SwitchSpec('bet_method', 'fsl_bet', ('fsl_bet', 'optibet')),
        SwitchSpec('optibet_gen_report', False),
        SwitchSpec('coreg_to_tmpl_method', 'ants', ('fnirt', 'ants')),
        ParamSpec('mni_template_resolution', None, choices=(0.5, 1, 2),
                  dtype=int),
        ParamSpec('fnirt_intensity_model', 'global_non_linear_with_bias'),
        ParamSpec('fnirt_subsampling', [4, 4, 2, 2, 1, 1]),
        ParamSpec('reoriented_dims', ('RL', 'AP', 'IS')),
        ParamSpec('resampled_resolution', None, dtype=list),
        SwitchSpec('coreg_method', 'ants', ('ants', 'flirt', 'spm'),
                   desc="The tool to use for linear registration"),
        ParamSpec('flirt_degrees_of_freedom', 6, desc=(
            "Number of degrees of freedom used in the registration. "
            "Default is 6 -> affine transformation.")),
        ParamSpec('flirt_cost_func', 'normmi', desc=(
            "Cost function used for the registration. Can be one of "
            "'mutualinfo', 'corratio', 'normcorr', 'normmi', 'leastsq',"
            " 'labeldiff', 'bbr'")),
        ParamSpec('flirt_qsform', False, desc=(
            "Whether to use the QS form supplied in the input image "
            "header (the image coordinates of the FOV supplied by the "
            "scanner")),
        ParamSpec(
            'channel_fname_regex',
            r'.*_(?P<channel>\d+)_(?P<echo>\d+)_(?P<axis>[A-Z]+)\.nii\.gz',
            desc=("The regular expression to extract channel, echo and complex"
                  " axis from the filenames of the coils channel images")),
        ParamSpec(
            'channel_real_label', 'REAL',
            desc=("The name of the real axis extracted from the channel "
                  "filename")),
        ParamSpec(
            'channel_imag_label', 'IMAGINARY',
            desc=("The name of the real axis extracted from the channel "
                  "filename"))]

    @property
    def mni_template_resolution(self):
        if self.parameter('mni_template_resolution') is not None:
            res = self.parameter('mni_template_resolution')
        else:
            raise ArcanaMissingDataException(
                "Automatic detection of dataset resolution is not implemented "
                "yet, please specify resolution of default MNI templates "
                "manually via 'mni_template_resolution' parameter")
        return res

    @property
    def is_coregistered(self):
        return self.provided('coreg_ref') or self.provided('coreg_ref_brain')

    @property
    def header_image_spec_name(self):
        if self.provided('header_image'):
            hdr_name = 'header_image'
        else:
            hdr_name = 'magnitude'
        return hdr_name

    @property
    def brain_spec_name(self):
        """
        The name of the brain extracted image after registration has been
        applied if registration is specified by supplying 'coreg_ref' or
        'coreg_ref_brain' optional inputs.
        """
        if self.is_coregistered:
            name = 'brain_coreg'
        else:
            name = 'brain'
        return name

    @property
    def brain_mask_spec_name(self):
        if self.is_coregistered:
            brain_mask = 'brain_mask_coreg'
        else:
            brain_mask = 'brain_mask'
        return brain_mask

    def preprocess_channels_pipeline(self, **name_maps):
        pipeline = self.new_pipeline(
            'preprocess_channels',
            name_maps=name_maps,
            desc=("Convert channel signals in complex coords to polar coords "
                  "and combine"))

        if (self.provided('header_image') or
                self.branch('reorient_to_std') or
                self.parameter('force_channel_flip') is not None):
            # Read channel files reorient them into standard space and then
            # write back to directory
            list_channels = pipeline.add(
                'list_channels',
                ListDir(),
                inputs={
                    'directory': ('channels', multi_nifti_gz_format)})

            if self.parameter('force_channel_flip') is not None:
                force_flip = pipeline.add(
                    'flip_dims',
                    fsl.SwapDimensions(
                        new_dims=tuple(self.parameter('force_channel_flip'))),
                    inputs={
                        'in_file': (list_channels, 'files')},
                    iterfield=['in_file'])
                geom_dest_file = (force_flip, 'out_file')
            else:
                geom_dest_file = (list_channels, 'files')

            if self.provided('header_image'):
                # If header image is provided stomp its geometry over the
                # acquired channels
                copy_geom = pipeline.add(
                    'qsm_copy_geometry',
                    fsl.CopyGeom(
                        output_type='NIFTI_GZ'),
                    inputs={
                        'in_file': ('header_image', nifti_gz_format),
                        'dest_file': geom_dest_file},
                    iterfield=(['dest_file']),
                    requirements=[fsl_req.v('5.0.8')])
                reorient_in_file = (copy_geom, 'out_file')
            else:
                reorient_in_file = geom_dest_file

            if self.branch('reorient_to_std'):
                reorient = pipeline.add(
                    'reorient_channel',
                    fsl.Reorient2Std(
                        output_type='NIFTI_GZ'),
                    inputs={
                        'in_file': reorient_in_file},
                    iterfield=['in_file'],
                    requirements=[fsl_req.v('5.0.8')])
                copy_to_dir_in_files = (reorient, 'out_file')
            else:
                copy_to_dir_in_files = reorient_in_file

            copy_to_dir = pipeline.add(
                'copy_to_dir',
                CopyToDir(),
                inputs={
                    'in_files': copy_to_dir_in_files,
                    'file_names': (list_channels, 'files')})
            to_polar_in_dir = (copy_to_dir, 'out_dir')
        else:
            to_polar_in_dir = ('channels', multi_nifti_gz_format)

        pipeline.add(
            'to_polar',
            ToPolarCoords(
                in_fname_re=self.parameter('channel_fname_regex'),
                real_label=self.parameter('channel_real_label'),
                imaginary_label=self.parameter('channel_imag_label')),
            inputs={
                'in_dir': to_polar_in_dir},
            outputs={
                'mag_channels': ('magnitudes_dir', multi_nifti_gz_format),
                'phase_channels': ('phases_dir', multi_nifti_gz_format)})

        return pipeline

    def coreg_pipeline(self, **name_maps):
        if self.branch('coreg_method', 'flirt'):
            pipeline = self._flirt_linear_coreg_pipeline(**name_maps)
        elif self.branch('coreg_method', 'ants'):
            pipeline = self._ants_linear_coreg_pipeline(**name_maps)
        elif self.branch('coreg_method', 'spm'):
            pipeline = self._spm_linear_coreg_pipeline(**name_maps)
        else:
            self.unhandled_branch('coreg_method')
        if not self.provided(pipeline.map_input('coreg_ref')):
            raise ArcanaOutputNotProducedException(
                "Cannot co-register {} as reference image "
                "'{}' has not been provided".format(
                    pipeline.map_input('coreg_ref')))
        return pipeline

    def brain_extraction_pipeline(self, **name_maps):
        if self.branch('bet_method', 'fsl_bet'):
            pipeline = self._bet_brain_extraction_pipeline(**name_maps)
        elif self.branch('bet_method', 'optibet'):
            pipeline = self._optiBET_brain_extraction_pipeline(**name_maps)
        else:
            self.unhandled_branch('bet_method')
        return pipeline

    def brain_coreg_pipeline(self, **name_maps):
        """
        Coregistered + brain-extracted images can be derived in 2-ways. If an
        explicit brain-extracted reference is provided to
        'coreg_ref_brain' then that is used to coregister a brain extracted
        image against. Alternatively, if only a skull-included reference is
        provided then the registration is performed with skulls-included and
        then brain extraction is performed after
        """
        if self.provided('coreg_ref_brain'):
            # If a reference brain extracted image is provided we coregister
            # the brain extracted image to that
            pipeline = self.coreg_pipeline(
                name='brain_coreg',
                name_maps=dict(
                    input_map={
                        'mag_preproc': 'brain',
                        'coreg_ref': 'coreg_ref_brain'},
                    output_map={
                        'mag_coreg': 'brain_coreg'},
                    name_maps=name_maps))

            # Apply coregistration transform to brain mask
            if self.branch('coreg_method', 'flirt'):
                pipeline.add(
                    'mask_transform',
                    ApplyXFM(
                        output_type='NIFTI_GZ',
                        apply_xfm=True),
                    inputs={
                        'in_matrix_file': (pipeline.node('flirt'),
                                           'out_matrix_file'),
                        'in_file': ('brain_mask', nifti_gz_format),
                        'reference': ('coreg_ref_brain', nifti_gz_format)},
                    outputs={
                        'brain_mask_coreg': ('out_file', nifti_gz_format)},
                    requirements=[fsl_req.v('5.0.10')],
                    wall_time=10)

            elif self.branch('coreg_method', 'ants'):
                # Convert ANTs transform matrix to FSL format if we have used
                # Ants registration so we can apply the transform using
                # ApplyXFM
                pipeline.add(
                    'mask_transform',
                    ants.resampling.ApplyTransforms(
                        interpolation='Linear',
                        input_image_type=3,
                        invert_transform_flags=[True, True, False]),
                    inputs={
                        'input_image': ('brain_mask', nifti_gz_format),
                        'reference_image': ('coreg_ref_brain',
                                            nifti_gz_format),
                        'transforms': (pipeline.node('ants_reg'),
                                       'forward_transforms')},
                    requirements=[ants_req.v('1.9')], mem_gb=16,
                    wall_time=30)
            else:
                self.unhandled_branch('coreg_method')

        elif self.provided('coreg_ref'):
            # If coreg_ref is provided then we co-register the non-brain
            # extracted images and then brain extract the co-registered image
            pipeline = self.brain_extraction_pipeline(
                name='bet_coreg',
                input_map={'mag_preproc': 'mag_coreg'},
                output_map={'brain': 'brain_coreg',
                            'brain_mask': 'brain_mask_coreg'},
                name_maps=name_maps)
        else:
            raise BananaUsageError(
                "Either 'coreg_ref' or 'coreg_ref_brain' needs to be provided "
                "in order to derive brain_coreg or brain_mask_coreg")
        return pipeline

    def _coreg_mat_pipeline(self, **name_maps):
        if self.provided('coreg_ref_brain'):
            pipeline = self.brain_coreg_pipeline(**name_maps)
        elif self.provided('coreg_ref'):
            pipeline = self.coreg_pipeline(**name_maps)
        else:
            raise ArcanaOutputNotProducedException(
                "'coregistration matrices can only be derived if 'coreg_ref' "
                "or 'coreg_ref_brain' is provided to {}".format(self))
        return pipeline

    def coreg_ants_mat_pipeline(self, **name_maps):
        if self.branch('coreg_method', 'ants'):
            pipeline = self._coreg_mat_pipeline(**name_maps)
        else:
            # Run the coreg_mat pipeline only to generate the ANTs transform
            # and mapping the typical outputs to None so they don't override
            # the other settings
            pipeline = self._coreg_mat_pipeline(
                output_maps={
                    'mag_preproc': None,
                    'brain_coreg': None,
                    'brain_mask_coreg': None},
                name_maps=name_maps)
        return pipeline

    def coreg_fsl_mat_pipeline(self, **name_maps):
        if self.branch('coreg_method', 'flirt'):
            pipeline = self._coreg_mat_pipeline(**name_maps)
        elif self.branch('coreg_method', 'ants'):
            # Convert ANTS transform to FSL transform
            pipeline = self.new_pipeline(
                name='convert_ants_to_fsl_coreg_mat',
                name_maps=name_maps)

            if self.provided('coreg_ref'):
                source = 'mag_preproc'
                ref = 'coreg_ref'
            elif self.provided('coreg_ref_brain'):
                source = 'brain'
                ref = 'coreg_ref_brain'
            else:
                raise BananaUsageError(
                    "Either 'coreg_ref' or 'coreg_ref_brain' needs to be "
                    "provided in order to derive brain_coreg or brain_coreg_"
                    "mask")

            pipeline.add(
                'transform_conv',
                ANTs2FSLMatrixConversion(
                    ras2fsl=True),
                inputs={
                    'itk_file': ('coreg_ants_mat', text_matrix_format),
                    'source_file': (source, nifti_gz_format),
                    'reference_file': (ref, nifti_gz_format)},
                outputs={
                    'coreg_fsl_mat': ('fsl_matrix', text_matrix_format)},
                requirements=[c3d_req.v('1.0')])
        else:
            self.unhandled_branch('coreg_method')

        return pipeline

    def coreg_to_tmpl_pipeline(self, **name_maps):
        if self.branch('coreg_to_tmpl_method', 'fnirt'):
            pipeline = self._fnirt_to_tmpl_pipeline(**name_maps)
        elif self.branch('coreg_to_tmpl_method', 'ants'):
            pipeline = self._ants_to_tmpl_pipeline(**name_maps)
        else:
            self.unhandled_branch('coreg_to_tmpl_method')
        return pipeline

    def _flirt_linear_coreg_pipeline(self, **name_maps):
        """
        Registers a MR scan to a refernce MR scan using FSL's FLIRT command
        """

        pipeline = self.new_pipeline(
            name='linear_coreg',
            name_maps=name_maps,
            desc="Registers a MR scan against a reference image using FLIRT",
            citations=[fsl_cite])

        pipeline.add(
            'flirt',
            FLIRT(dof=self.parameter('flirt_degrees_of_freedom'),
                  cost=self.parameter('flirt_cost_func'),
                  cost_func=self.parameter('flirt_cost_func'),
                  output_type='NIFTI_GZ'),
            inputs={
                'in_file': ('mag_preproc', nifti_gz_format),
                'reference': ('coreg_ref', nifti_gz_format)},
            outputs={
                'mag_coreg': ('out_file', nifti_gz_format),
                'coreg_fsl_mat': ('out_matrix_file', text_matrix_format)},
            requirements=[fsl_req.v('5.0.8')],
            wall_time=5)

        return pipeline

    def _ants_linear_coreg_pipeline(self, **name_maps):
        """
        Registers a MR scan to a refernce MR scan using ANTS's linear_reg
        command
        """

        pipeline = self.new_pipeline(
            name='linear_coreg',
            name_maps=name_maps,
            desc="Registers a MR scan against a reference image using ANTs",
            citations=[ants_cite])

        pipeline.add(
            'ANTs_linear_Reg',
            AntsRegSyn(
                num_dimensions=3,
                transformation='r'),
            inputs={
                'ref_file': ('coreg_ref', nifti_gz_format),
                'input_file': ('mag_preproc', nifti_gz_format)},
            outputs={
                'mag_coreg': ('reg_file', nifti_gz_format),
                'coreg_ants_mat': ('regmat', text_matrix_format)},
            wall_time=10,
            requirements=[ants_req.v('2.0')])


#         ants_reg = pipeline.add(
#             'ants_reg',
#             ants.Registration(
#                 dimension=3,
#                 collapse_output_transforms=True,
#                 float=False,
#                 interpolation='Linear',
#                 use_histogram_matching=False,
#                 winsorize_upper_quantile=0.995,
#                 winsorize_lower_quantile=0.005,
#                 verbose=True,
#                 transforms=['Rigid'],
#                 transform_parameters=[(0.1,)],
#                 metric=['MI'],
#                 metric_weight=[1],
#                 radius_or_number_of_bins=[32],
#                 sampling_strategy=['Regular'],
#                 sampling_percentage=[0.25],
#                 number_of_iterations=[[1000, 500, 250, 100]],
#                 convergence_threshold=[1e-6],
#                 convergence_window_size=[10],
#                 shrink_factors=[[8, 4, 2, 1]],
#                 smoothing_sigmas=[[3, 2, 1, 0]],
#                 output_warped_image=True),
#             inputs={
#                 'fixed_image': ('coreg_ref', nifti_gz_format),
#                 'moving_image': ('mag_preproc', nifti_gz_format)},
#             outputs={
#                 'mag_coreg': ('warped_image', nifti_gz_format)},
#             wall_time=10,
#             requirements=[ants_req.v('2.0')])
#
#         pipeline.add(
#             'select',
#             SelectOne(
#                 index=0),
#             inputs={
#                 'inlist': (ants_reg, 'forward_transforms')},
#             outputs={
#                 'coreg_ants_mat': ('out', text_matrix_format)})

        return pipeline

    def _spm_linear_coreg_pipeline(self, **name_maps):  # @UnusedVariable
        """
        Coregisters T2 image to T1 image using SPM's "Register" method.

        NB: Default values come from the W2MHS toolbox
        """
        pipeline = self.new_pipeline(
            'linear_coreg',
            name_maps=name_maps,
            desc="Coregister T2-weighted images to T1",
            citations=[spm_cite])

        pipeline.add(
            'mag_coreg',
            Coregister(
                jobtype='estwrite',
                cost_function='nmi',
                separation=[4, 2],
                tolerance=[0.02, 0.02, 0.02, 0.001, 0.001, 0.001, 0.01, 0.01,
                           0.01, 0.001, 0.001, 0.001],
                fwhm=[7, 7],
                write_interp=4,
                write_wrap=[0, 0, 0],
                write_mask=False,
                out_prefix='r'),
            inputs={
                'target': ('coreg_ref', nifti_format),
                'source': ('mag_preproc', nifti_format)},
            outputs={
                'mag_coreg': ('coregistered_source', nifti_format)},
            requirements=[spm_req.v(12)],
            wall_time=30)
        return pipeline

    def qform_transform_pipeline(self, **name_maps):
        pipeline = self.new_pipeline(
            name='qform_transform',
            name_maps=name_maps,
            desc="Registers a MR scan against a reference image",
            citations=[fsl_cite])

        if self.provided('coreg_ref'):
            in_file = 'mag_preproc'
            reference = 'coreg_ref'
        elif self.provided('coreg_ref_brain'):
            in_file = 'brain'
            reference = 'coreg_ref_brain'
        else:
            raise BananaUsageError(
                "'coreg_ref' or 'coreg_ref_brain' need to be provided to "
                "study in order to run qform_transform")

        pipeline.add(
            'flirt',
            FLIRT(
                uses_qform=True,
                apply_xfm=True,
                output_type='NIFTI_GZ'),
            inputs={
                'in_file': (in_file, nifti_gz_format),
                'reference': (reference, nifti_gz_format)},
            outputs={
                'qformed': ('out_file', nifti_gz_format),
                'qform_mat': ('out_matrix_file', text_matrix_format)},
            requirements=[fsl_req.v('5.0.8')],
            wall_time=5)

        return pipeline

    def _bet_brain_extraction_pipeline(self, **name_maps):
        """
        Generates a whole brain mask using FSL's BET command.
        """
        pipeline = self.new_pipeline(
            name='brain_extraction',
            name_maps=name_maps,
            desc="Generate brain mask from mr_scan",
            citations=[fsl_cite, bet_cite, bet2_cite])
        # Create mask node
        bet = pipeline.add(
            "bet",
            fsl.BET(
                mask=True,
                output_type='NIFTI_GZ',
                frac=self.parameter('bet_f_threshold'),
                vertical_gradient=self.parameter('bet_g_threshold')),
            inputs={
                'in_file': ('mag_preproc', nifti_gz_format)},
            outputs={
                'brain': ('out_file', nifti_gz_format),
                'brain_mask': ('mask_file', nifti_gz_format)},
            requirements=[fsl_req.v('5.0.9')])
        # Set either robust or reduce bias
        if self.branch('bet_robust'):
            bet.inputs.robust = True
        else:
            bet.inputs.reduce_bias = self.parameter('bet_reduce_bias')
        return pipeline

    def _optiBET_brain_extraction_pipeline(self, **name_maps):
        """
        Generates a whole brain mask using a modified optiBET approach.
        """
        pipeline = self.new_pipeline(
            name='brain_extraction',
            name_maps=name_maps,
            desc=("Modified implementation of optiBET.sh"),
            citations=[fsl_cite])

        mni_reg = pipeline.add(
            'T1_reg',
            AntsRegSyn(
                num_dimensions=3,
                transformation='s',
                out_prefix='T12MNI',
                num_threads=4),
            inputs={
                'ref_file': ('template', nifti_gz_format),
                'input_file': ('mag_preproc', nifti_gz_format)},
            wall_time=25,
            requirements=[ants_req.v('2.0')])

        merge_trans = pipeline.add(
            'merge_transforms',
            Merge(2),
            inputs={
                'in1': (mni_reg, 'inv_warp'),
                'in2': (mni_reg, 'regmat')},
            wall_time=1)

        trans_flags = pipeline.add(
            'trans_flags',
            Merge(2,
                  in1=False,
                  in2=True),
            wall_time=1)

        apply_trans = pipeline.add(
            'ApplyTransform',
            ApplyTransforms(
                interpolation='NearestNeighbor',
                input_image_type=3),
            inputs={
                'input_image': ('template_mask', nifti_gz_format),
                'reference_image': ('mag_preproc', nifti_gz_format),
                'transforms': (merge_trans, 'out'),
                'invert_transform_flags': (trans_flags, 'out')},
            wall_time=7,
            mem_gb=24,
            requirements=[ants_req.v('2.0')])

        maths1 = pipeline.add(
            'binarize',
            fsl.ImageMaths(
                suffix='_optiBET_brain_mask',
                op_string='-bin',
                output_type='NIFTI_GZ'),
            inputs={
                'in_file': (apply_trans, 'output_image')},
            outputs={
                'brain_mask': ('out_file', nifti_gz_format)},
            wall_time=5,
            requirements=[fsl_req.v('5.0.8')])

        maths2 = pipeline.add(
            'mask',
            fsl.ImageMaths(
                suffix='_optiBET_brain',
                op_string='-mas',
                output_type='NIFTI_GZ'),
            inputs={
                'in_file': ('mag_preproc', nifti_gz_format),
                'in_file2': (maths1, 'out_file')},
            outputs={
                'brain': ('out_file', nifti_gz_format)},
            wall_time=5,
            requirements=[fsl_req.v('5.0.8')])

        if self.branch('optibet_gen_report'):
            pipeline.add(
                'slices',
                FSLSlices(
                    outname='optiBET_report',
                    output_type='NIFTI_GZ'),
                wall_time=5,
                inputs={
                    'im1': ('mag_preproc', nifti_gz_format),
                    'im2': (maths2, 'out_file')},
                outputs={
                    'optiBET_report': ('report', gif_format)},
                requirements=[fsl_req.v('5.0.8')])

        return pipeline

    # @UnusedVariable @IgnorePep8
    def _fnirt_to_tmpl_pipeline(self, **name_maps):
        """
        Registers a MR scan to a refernce MR scan using FSL's nonlinear FNIRT
        command

        Parameters
        ----------
        template : Which template to use, can be one of 'mni_nl6'
        """
        pipeline = self.new_pipeline(
            name='mag_coreg_to_tmpl',
            name_maps=name_maps,
            desc=("Nonlinearly registers a MR scan to a standard space,"
                  "e.g. MNI-space"),
            citations=[fsl_cite])

        # Basic reorientation to standard MNI space
        reorient = pipeline.add(
            'reorient',
            Reorient2Std(
                output_type='NIFTI_GZ'),
            inputs={
                'in_file': ('mag_preproc', nifti_gz_format)},
            requirements=[fsl_req.v('5.0.8')])

        reorient_mask = pipeline.add(
            'reorient_mask',
            Reorient2Std(
                output_type='NIFTI_GZ'),
            inputs={
                'in_file': ('brain_mask', nifti_gz_format)},
            requirements=[fsl_req.v('5.0.8')])

        reorient_brain = pipeline.add(
            'reorient_brain',
            Reorient2Std(
                output_type='NIFTI_GZ'),
            inputs={
                'in_file': ('brain', nifti_gz_format)},
            requirements=[fsl_req.v('5.0.8')])

        # Affine transformation to MNI space
        flirt = pipeline.add(
            'flirt',
            interface=FLIRT(
                dof=12,
                output_type='NIFTI_GZ'),
            inputs={
                'reference': ('template_brain', nifti_gz_format),
                'in_file': (reorient_brain, 'out_file')},
            requirements=[fsl_req.v('5.0.8')],
            wall_time=5)

        # Apply mask if corresponding subsampling scheme is 1
        # (i.e. 1-to-1 resolution) otherwise don't.
        apply_mask = [int(s == 1)
                      for s in self.parameter('fnirt_subsampling')]
        # Nonlinear transformation to MNI space
        pipeline.add(
            'fnirt',
            interface=FNIRT(
                output_type='NIFTI_GZ',
                intensity_mapping_model=(
                    self.parameter('fnirt_intensity_model')
                    if self.parameter('fnirt_intensity_model') is not None else
                    'none'),
                subsampling_scheme=self.parameter('fnirt_subsampling'),
                fieldcoeff_file=True,
                in_fwhm=[8, 6, 5, 4, 3, 2],  # [8, 6, 5, 4.5, 3, 2] This threw an error because of float value @IgnorePep8,
                ref_fwhm=[8, 6, 5, 4, 2, 0],
                regularization_lambda=[300, 150, 100, 50, 40, 30],
                apply_intensity_mapping=[1, 1, 1, 1, 1, 0],
                max_nonlin_iter=[5, 5, 5, 5, 5, 10],
                apply_inmask=apply_mask,
                apply_refmask=apply_mask),
            inputs={
                'ref_file': ('template', nifti_gz_format),
                'refmask': ('template_mask', nifti_gz_format),
                'in_file': (reorient, 'out_file'),
                'inmask_file': (reorient_mask, 'out_file'),
                'affine_file': (flirt, 'out_matrix_file')},
            outputs={
                'mag_coreg_to_tmpl': ('warped_file', nifti_gz_format),
                'coreg_to_tmpl_fsl_coeff': ('fieldcoeff_file',
                                             nifti_gz_format)},
            requirements=[fsl_req.v('5.0.8')],
            wall_time=60)
        # Set registration parameters
        # TODO: Need to work out which parameters to use
        return pipeline

    def _ants_to_tmpl_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            name='mag_coreg_to_tmpl',
            name_maps=name_maps,
            desc=("Nonlinearly registers a MR scan to a standard space,"
                  "e.g. MNI-space"),
            citations=[fsl_cite])

        pipeline.add(
            'Struct2MNI_reg',
            AntsRegSyn(
                num_dimensions=3,
                transformation='s',
                num_threads=4),
            inputs={
                'input_file': (self.brain_spec_name, nifti_gz_format),
                'ref_file': ('template_brain', nifti_gz_format)},
            outputs={
                'mag_coreg_to_tmpl': ('reg_file', nifti_gz_format),
                'coreg_to_tmpl_ants_mat': ('regmat', text_matrix_format),
                'coreg_to_tmpl_ants_warp': ('warp_file', nifti_gz_format)},
            wall_time=25,
            requirements=[ants_req.v('2.0')])

#         ants_reg = pipeline.add(
#             'ants_reg',
#             ants.Registration(
#                 dimension=3,
#                 collapse_output_transforms=True,
#                 float=False,
#                 interpolation='Linear',
#                 use_histogram_matching=False,
#                 winsorize_upper_quantile=0.995,
#                 winsorize_lower_quantile=0.005,
#                 verbose=True,
#                 transforms=['Rigid', 'Affine', 'SyN'],
#                 transform_parameters=[(0.1,), (0.1,), (0.1, 3, 0)],
#                 metric=['MI', 'MI', 'CC'],
#                 metric_weight=[1, 1, 1],
#                 radius_or_number_of_bins=[32, 32, 32],
#                 sampling_strategy=['Regular', 'Regular', 'None'],
#                 sampling_percentage=[0.25, 0.25, None],
#                 number_of_iterations=[[1000, 500, 250, 100],
#                                       [1000, 500, 250, 100],
#                                       [100, 70, 50, 20]],
#                 convergence_threshold=[1e-6, 1e-6, 1e-6],
#                 convergence_window_size=[10, 10, 10],
#                 shrink_factors=[[8, 4, 2, 1],
#                                 [8, 4, 2, 1],
#                                 [8, 4, 2, 1]],
#                 smoothing_sigmas=[[3, 2, 1, 0],
#                                   [3, 2, 1, 0],
#                                   [3, 2, 1, 0]],
#                 output_warped_image=True),
#             inputs={
#                 'fixed_image': ('template_brain', nifti_gz_format),
#                 'moving_image': (self.brain_spec_name, nifti_gz_format)},
#             outputs={
#                 'mag_coreg_to_tmpl': ('warped_image', nifti_gz_format)},
#             wall_time=25,
#             requirements=[ants_req.v('2.0')])
#
#         select_trans = pipeline.add(
#             'select',
#             SelectOne(
#                 index=1),
#             inputs={
#                 'inlist': (ants_reg, 'forward_transforms')},
#             outputs={
#                 'coreg_to_tmpl_ants_mat': ('out', text_matrix_format)})
#
#         pipeline.add(
#             'select_warp',
#             SelectOne(
#                 index=0),
#             inputs={
#                 'inlist': (ants_reg, 'forward_transforms')},
#             outputs={
#                 'coreg_to_tmpl_ants_warp': ('out', nifti_gz_format)})
#
#         pipeline.add(
#             'slices',
#             FSLSlices(
#                 outname='coreg_to_tmpl_report'),
#             inputs={
#                 'im1': ('template', nifti_gz_format),
#                 'im2': (select_trans, 'out')},
#             outputs={
#                 'coreg_to_tmpl_fsl_report': ('report', gif_format)},
#             wall_time=1,
#             requirements=[fsl_req.v('5.0.8')])

        return pipeline

    def prepare_pipeline(self, **name_maps):
        """
        Performs basic preprocessing, such as swapping dimensions into
        standard orientation and resampling (if required)

        Parameters
        -------
        new_dims : tuple(str)[3]
            A 3-tuple with the new orientation of the image (see FSL
            swap dim)
        resolution : list(float)[3] | None
            New resolution of the image. If None no resampling is
            performed
        """
        pipeline = self.new_pipeline(
            name='prepare_pipeline',
            name_maps=name_maps,
            desc=("Dimensions swapping to ensure that all the images "
                  "have the same orientations."),
            citations=[fsl_cite])

        if (self.branch('reorient_to_std') or
                self.parameter('resampled_resolution') is not None):
            if self.branch('reorient_to_std'):
                swap = pipeline.add(
                    'fslreorient2std',
                    fsl.utils.Reorient2Std(
                        output_type='NIFTI_GZ'),
                    inputs={
                        'in_file': ('magnitude', nifti_gz_format)},
                    requirements=[fsl_req.v('5.0.9')])
    #         swap.inputs.new_dims = self.parameter('reoriented_dims')

            if self.parameter('resampled_resolution') is not None:
                resample = pipeline.add(
                    "resample",
                    MRResize(
                        voxel=self.parameter('resampled_resolution')),
                    inputs={
                        'in_file': (swap, 'out_file')},
                    requirements=[mrtrix_req.v('3.0rc3')])
                pipeline.connect_output('mag_preproc', resample, 'out_file',
                                        nifti_gz_format)
            else:
                pipeline.connect_output('mag_preproc', swap, 'out_file',
                                        nifti_gz_format)
        else:
            # Don't actually do any processing just copy magnitude image to
            # preproc
            pipeline.add(
                'identity',
                IdentityInterface(
                    ['file']),
                inputs={
                    'file': ('magnitude', nifti_gz_format)},
                outputs={
                    'mag_preproc': ('file', nifti_gz_format)})

        return pipeline

    def header_extraction_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            name='header_extraction',
            name_maps=name_maps,
            desc=("Pipeline to extract the most important scan "
                  "information from the image header"),
            citations=[])

        input_format = self.input(self.header_image_spec_name).format

        if input_format == dicom_format:

            pipeline.add(
                'hd_info_extraction',
                DicomHeaderInfoExtraction(
                    multivol=False),
                inputs={
                    'dicom_folder': (self.header_image_spec_name, dicom_format)},
                outputs={
                    'tr': ('tr', float),
                    'start_time': ('start_time', str),
                    'total_duration': ('total_duration', str),
                    'real_duration': ('real_duration', str),
                    'ped': ('ped', str),
                    'pe_angle': ('pe_angle', str),
                    'echo_times': ('echo_times', float),
                    'voxel_sizes': ('voxel_sizes', float),
                    'main_field_strength': ('B0', float),
                    'main_field_orient': ('H', float)})

        elif input_format == nifti_gz_x_format:

            pipeline.add(
                'hd_info_extraction',
                NiftixHeaderInfoExtraction(),
                inputs={
                    'in_file': (self.header_image_spec_name, nifti_gz_x_format)},
                outputs={
                    'tr': ('tr', float),
                    'start_time': ('start_time', str),
                    'total_duration': ('total_duration', str),
                    'real_duration': ('real_duration', str),
                    'ped': ('ped', str),
                    'pe_angle': ('pe_angle', str),
                    'echo_times': ('echo_times', float),
                    'voxel_sizes': ('voxel_sizes', float),
                    'main_field_strength': ('B0', float),
                    'main_field_orient': ('H', float)})
        else:
            raise BananaUsageError(
                "Can only extract header info if 'magnitude' fileset "
                "is provided in DICOM or extended NIfTI format (provided {})"
                .format(self.input(self.header_image_spec_name).format))

        return pipeline

    def motion_mat_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            name='motion_mat_calculation',
            name_maps=name_maps,
            desc=("Motion matrices calculation"),
            citations=[fsl_cite])

        mm = pipeline.add(
            'motion_mats',
            MotionMatCalculation(),
            outputs={
                'motion_mats': ('motion_mats', motion_mats_format)})
        if not self.spec('coreg_fsl_mat').derivable:
            logger.info("Cannot derive 'coreg_matrix' for {} required for "
                        "motion matrix calculation, assuming that it "
                        "is the reference study".format(self))
            mm.inputs.reference = True
            pipeline.connect_input('magnitude', mm, 'dummy_input')
        else:
            pipeline.connect_input('coreg_fsl_mat', mm, 'reg_mat',
                                   text_matrix_format)
            pipeline.connect_input('qform_mat', mm, 'qform_mat',
                                   text_matrix_format)
            if 'align_mats' in self.data_spec_names():
                pipeline.connect_input('align_mats', mm, 'align_mats',
                                       motion_mats_format)
        return pipeline
Ejemplo n.º 26
0
class BoldStudy(EpiSeriesStudy, metaclass=StudyMetaClass):

    desc = "Functional MRI BOLD MRI contrast"

    add_data_specs = [
        InputFilesetSpec('train_data',
                         rfile_format,
                         optional=True,
                         frequency='per_study'),
        FilesetSpec('hand_label_noise', text_format,
                    'fix_preparation_pipeline'),
        FilesetSpec('labelled_components', text_format,
                    'fix_classification_pipeline'),
        FilesetSpec('cleaned_file', nifti_gz_format,
                    'fix_regression_pipeline'),
        FilesetSpec('filtered_data', nifti_gz_format,
                    'rsfMRI_filtering_pipeline'),
        FilesetSpec('mc_par', par_format, 'rsfMRI_filtering_pipeline'),
        FilesetSpec('melodic_ica', zip_format,
                    'single_subject_melodic_pipeline'),
        FilesetSpec('fix_dir', zip_format, 'fix_preparation_pipeline'),
        FilesetSpec('normalized_ts', nifti_gz_format,
                    'timeseries_normalization_to_atlas_pipeline'),
        FilesetSpec('smoothed_ts', nifti_gz_format, 'smoothing_pipeline')
    ]

    add_param_specs = [
        ParamSpec('component_threshold', 20),
        ParamSpec('motion_reg', True),
        ParamSpec('highpass', 0.01),
        ParamSpec('brain_thresh_percent', 5),
        ParamSpec('group_ica_components', 15)
    ]

    primary_bids_selector = BidsInputs(spec_name='series',
                                       type='bold',
                                       valid_formats=(nifti_gz_x_format,
                                                      nifti_gz_format))

    default_bids_inputs = [
        primary_bids_selector,
        BidsAssocInput(spec_name='field_map_phase',
                       primary=primary_bids_selector,
                       association='phasediff',
                       format=nifti_gz_format,
                       drop_if_missing=True),
        BidsAssocInput(spec_name='field_map_mag',
                       primary=primary_bids_selector,
                       association='phasediff',
                       type='magnitude',
                       format=nifti_gz_format,
                       drop_if_missing=True)
    ]

    def rsfMRI_filtering_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            name='rsfMRI_filtering',
            desc=("Spatial and temporal rsfMRI filtering"),
            citations=[fsl_cite],
            name_maps=name_maps)

        afni_mc = pipeline.add(
            'AFNI_MC',
            Volreg(zpad=1,
                   out_file='rsfmri_mc.nii.gz',
                   oned_file='prefiltered_func_data_mcf.par'),
            inputs={'in_file': ('series_preproc', nifti_gz_format)},
            outputs={'mc_par': ('oned_file', par_format)},
            wall_time=5,
            requirements=[afni_req.v('16.2.10')])

        filt = pipeline.add('Tproject',
                            Tproject(stopband=(0, 0.01),
                                     polort=3,
                                     blur=3,
                                     out_file='filtered_func_data.nii.gz'),
                            inputs={
                                'delta_t': ('tr', float),
                                'mask':
                                (self.brain_mask_spec_name, nifti_gz_format),
                                'in_file': (afni_mc, 'out_file')
                            },
                            wall_time=5,
                            requirements=[afni_req.v('16.2.10')])

        meanfunc = pipeline.add('meanfunc',
                                ImageMaths(op_string='-Tmean',
                                           suffix='_mean',
                                           output_type='NIFTI_GZ'),
                                wall_time=5,
                                inputs={'in_file': (afni_mc, 'out_file')},
                                requirements=[fsl_req.v('5.0.10')])

        pipeline.add('add_mean',
                     ImageMaths(op_string='-add', output_type='NIFTI_GZ'),
                     inputs={
                         'in_file': (filt, 'out_file'),
                         'in_file2': (meanfunc, 'out_file')
                     },
                     outputs={'filtered_data': ('out_file', nifti_gz_format)},
                     wall_time=5,
                     requirements=[fsl_req.v('5.0.10')])

        return pipeline

    def single_subject_melodic_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            name='MelodicL1',
            desc=("Single subject ICA analysis using FSL MELODIC."),
            citations=[fsl_cite],
            name_maps=name_maps)

        pipeline.add('melodic_L1',
                     MELODIC(
                         no_bet=True,
                         bg_threshold=self.parameter('brain_thresh_percent'),
                         report=True,
                         out_stats=True,
                         mm_thresh=0.5,
                         out_dir='melodic_ica',
                         output_type='NIFTI_GZ'),
                     inputs={
                         'mask': (self.brain_mask_spec_name, nifti_gz_format),
                         'tr_sec': ('tr', float),
                         'in_files': ('filtered_data', nifti_gz_format)
                     },
                     outputs={'melodic_ica': ('out_dir', directory_format)},
                     wall_time=15,
                     requirements=[fsl_req.v('5.0.10')])

        return pipeline

    def fix_preparation_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            name='prepare_fix',
            desc=("Pipeline to create the right folder structure before "
                  "running FIX"),
            citations=[fsl_cite],
            name_maps=name_maps)

        if self.branch('coreg_to_tmpl_method', 'ants'):

            struct_ants2fsl = pipeline.add(
                'struct_ants2fsl',
                ANTs2FSLMatrixConversion(ras2fsl=True),
                inputs={
                    'reference_file': ('template_brain', nifti_gz_format),
                    'itk_file': ('coreg_to_tmpl_ants_mat', text_matrix_format),
                    'source_file': ('coreg_ref_brain', nifti_gz_format)
                },
                requirements=[c3d_req.v('1.0.0')])

            struct_matrix = (struct_ants2fsl, 'fsl_matrix')
        else:
            struct_matrix = ('coreg_to_tmpl_fsl_mat', text_matrix_format)


#         if self.branch('coreg_method', 'ants'):
#         epi_ants2fsl = pipeline.add(
#             'epi_ants2fsl',
#             ANTs2FSLMatrixConversion(
#                 ras2fsl=True),
#             inputs={
#                 'source_file': ('brain', nifti_gz_format),
#                 'itk_file': ('coreg_ants_mat', text_matrix_format),
#                 'reference_file': ('coreg_ref_brain', nifti_gz_format)},
#             requirements=[c3d_req.v('1.0.0')])

        MNI2t1 = pipeline.add('MNI2t1',
                              ConvertXFM(invert_xfm=True),
                              inputs={'in_file': struct_matrix},
                              wall_time=5,
                              requirements=[fsl_req.v('5.0.9')])

        struct2epi = pipeline.add(
            'struct2epi',
            ConvertXFM(invert_xfm=True),
            inputs={'in_file': ('coreg_fsl_mat', text_matrix_format)},
            wall_time=5,
            requirements=[fsl_req.v('5.0.9')])

        meanfunc = pipeline.add(
            'meanfunc',
            ImageMaths(op_string='-Tmean',
                       suffix='_mean',
                       output_type='NIFTI_GZ'),
            inputs={'in_file': ('series_preproc', nifti_gz_format)},
            wall_time=5,
            requirements=[fsl_req.v('5.0.9')])

        pipeline.add('prep_fix',
                     PrepareFIX(),
                     inputs={
                         'melodic_dir': ('melodic_ica', directory_format),
                         't1_brain': ('coreg_ref_brain', nifti_gz_format),
                         'mc_par': ('mc_par', par_format),
                         'epi_brain_mask': ('brain_mask', nifti_gz_format),
                         'epi_preproc': ('series_preproc', nifti_gz_format),
                         'filtered_epi': ('filtered_data', nifti_gz_format),
                         'epi2t1_mat': ('coreg_fsl_mat', text_matrix_format),
                         't12MNI_mat': (struct_ants2fsl, 'fsl_matrix'),
                         'MNI2t1_mat': (MNI2t1, 'out_file'),
                         't12epi_mat': (struct2epi, 'out_file'),
                         'epi_mean': (meanfunc, 'out_file')
                     },
                     outputs={
                         'fix_dir': ('fix_dir', directory_format),
                         'hand_label_noise': ('hand_label_file', text_format)
                     })

        return pipeline

    def fix_classification_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            name='fix_classification',
            desc=("Automatic classification of noisy components from the "
                  "rsfMRI data using fsl FIX."),
            citations=[fsl_cite],
            name_maps=name_maps)

        pipeline.add(
            "fix",
            FSLFIX(component_threshold=self.parameter('component_threshold'),
                   motion_reg=self.parameter('motion_reg'),
                   classification=True),
            inputs={
                "feat_dir": ("fix_dir", directory_format),
                "train_data": ("train_data", rfile_format)
            },
            outputs={'labelled_components': ('label_file', text_format)},
            wall_time=30,
            requirements=[fsl_req.v('5.0.9'),
                          fix_req.v('1.0')])

        return pipeline

    def fix_regression_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            name='signal_regression',
            desc=("Regression of the noisy components from the rsfMRI data "
                  "using a python implementation equivalent to that in FIX."),
            citations=[fsl_cite],
            name_maps=name_maps)

        pipeline.add(
            "signal_reg",
            SignalRegression(motion_regression=self.parameter('motion_reg'),
                             highpass=self.parameter('highpass')),
            inputs={
                "fix_dir": ("fix_dir", directory_format),
                "labelled_components": ("labelled_components", text_format)
            },
            outputs={'cleaned_file': ('output', nifti_gz_format)},
            wall_time=30,
            requirements=[fsl_req.v('5.0.9'),
                          fix_req.v('1.0')])

        return pipeline

    def timeseries_normalization_to_atlas_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            name='timeseries_normalization_to_atlas_pipeline',
            desc=("Apply ANTs transformation to the fmri filtered file to "
                  "normalize it to MNI 2mm."),
            citations=[fsl_cite],
            name_maps=name_maps)

        merge_trans = pipeline.add('merge_transforms',
                                   NiPypeMerge(3),
                                   inputs={
                                       'in1': ('coreg_to_tmpl_ants_warp',
                                               nifti_gz_format),
                                       'in2': ('coreg_to_tmpl_ants_mat',
                                               text_matrix_format),
                                       'in3':
                                       ('coreg_matrix', text_matrix_format)
                                   },
                                   wall_time=1)

        pipeline.add(
            'ApplyTransform',
            ApplyTransforms(interpolation='Linear', input_image_type=3),
            inputs={
                'reference_image': ('template_brain', nifti_gz_format),
                'input_image': ('cleaned_file', nifti_gz_format),
                'transforms': (merge_trans, 'out')
            },
            outputs={'normalized_ts': ('output_image', nifti_gz_format)},
            wall_time=7,
            mem_gb=24,
            requirements=[ants_req.v('2')])

        return pipeline

    def smoothing_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            name='smoothing_pipeline',
            desc=("Spatial smoothing of the normalized fmri file"),
            citations=[fsl_cite],
            name_maps=name_maps)

        pipeline.add('3dBlurToFWHM',
                     BlurToFWHM(fwhm=5, out_file='smoothed_ts.nii.gz'),
                     inputs={
                         'mask': ('template_mask', nifti_gz_format),
                         'in_file': ('normalized_ts', nifti_gz_format)
                     },
                     outputs={'smoothed_ts': ('out_file', nifti_gz_format)},
                     wall_time=5,
                     requirements=[afni_req.v('16.2.10')])

        return pipeline
Ejemplo n.º 27
0
class DwiRefStudy(EpiStudy, metaclass=StudyMetaClass):

    add_data_specs = [
        InputFilesetSpec('reverse_phase', STD_IMAGE_FORMATS, optional=True)
    ]

    desc = ("A special study used in the MR-PET motion correction algorithm to"
            " perform distortion correction on the reverse-phase/reference b0 "
            "scans by flipping it around and using the DWI series as the "
            "reference")

    def preprocess_pipeline(self, **name_maps):

        if self.provided('reverse_phase'):
            return self._topup_pipeline(**name_maps)
        else:
            return super().preprocess_pipeline(**name_maps)

    def _topup_pipeline(self, **name_maps):
        """
        Implementation of separate topup pipeline, moved from EPI study as it
        is only really relevant for spin-echo DWI. Need to work out what to do
        with it
        """

        pipeline = self.new_pipeline(
            name='preprocess_pipeline',
            desc=("Topup distortion correction pipeline"),
            citations=[fsl_cite],
            name_maps=name_maps)

        reorient_epi_in = pipeline.add(
            'reorient_epi_in',
            fsl.utils.Reorient2Std(),
            inputs={
                'in_file': ('magnitude', nifti_gz_format)},
            requirements=[fsl_req.v('5.0.9')])

        reorient_epi_opposite = pipeline.add(
            'reorient_epi_opposite',
            fsl.utils.Reorient2Std(),
            inputs={
                'in_file': ('reverse_phase', nifti_gz_format)},
            requirements=[fsl_req.v('5.0.9')])

        prep_dwi = pipeline.add(
            'prepare_dwi',
            PrepareDWI(
                topup=True),
            inputs={
                'pe_dir': ('ped', str),
                'ped_polarity': ('pe_angle', str),
                'dwi': (reorient_epi_in, 'out_file'),
                'dwi1': (reorient_epi_opposite, 'out_file')})

        ped = pipeline.add(
            'gen_config',
            GenTopupConfigFiles(),
            inputs={
                'ped': (prep_dwi, 'pe')})

        merge_outputs = pipeline.add(
            'merge_files',
            merge_lists(2),
            inputs={
                'in1': (prep_dwi, 'main'),
                'in2': (prep_dwi, 'secondary')})

        merge = pipeline.add(
            'FslMerge',
            FslMerge(
                dimension='t',
                output_type='NIFTI_GZ'),
            inputs={
                'in_files': (merge_outputs, 'out')},
            requirements=[fsl_req.v('5.0.9')])

        topup = pipeline.add(
            'topup',
            TOPUP(
                output_type='NIFTI_GZ'),
            inputs={
                'in_file': (merge, 'merged_file'),
                'encoding_file': (ped, 'config_file')},
            requirements=[fsl_req.v('5.0.9')])

        in_apply_tp = pipeline.add(
            'in_apply_tp',
            merge_lists(1),
            inputs={
                'in1': (reorient_epi_in, 'out_file')})

        pipeline.add(
            'applytopup',
            ApplyTOPUP(
                method='jac',
                in_index=[1],
                output_type='NIFTI_GZ'),
            inputs={
                'in_files': (in_apply_tp, 'out'),
                'encoding_file': (ped, 'apply_topup_config'),
                'in_topup_movpar': (topup, 'out_movpar'),
                'in_topup_fieldcoef': (topup, 'out_fieldcoef')},
            outputs={
                'mag_preproc': ('out_corrected', nifti_gz_format)},
            requirements=[fsl_req.v('5.0.9')])

        return pipeline
Ejemplo n.º 28
0
class TestDerivableStudy(with_metaclass(StudyMetaClass, Study)):

    add_data_specs = [
        InputFilesetSpec('required', text_format),
        InputFilesetSpec('optional', text_format, optional=True),
        FilesetSpec('derivable', text_format, 'pipeline1'),
        FilesetSpec('missing_input', text_format, 'pipeline2'),
        FilesetSpec('another_derivable', text_format, 'pipeline3'),
        FilesetSpec('requires_switch', text_format, 'pipeline3'),
        FilesetSpec('requires_switch2', text_format, 'pipeline4'),
        FilesetSpec('requires_foo', text_format, 'pipeline5'),
        FilesetSpec('requires_bar', text_format, 'pipeline5')
    ]

    add_param_specs = [
        SwitchSpec('switch', False),
        SwitchSpec('branch', 'foo', ('foo', 'bar', 'wee'))
    ]

    def pipeline1(self, **name_maps):
        pipeline = self.new_pipeline('pipeline1',
                                     desc="",
                                     citations=[],
                                     name_maps=name_maps)
        identity = pipeline.add('identity', IdentityInterface(['a']))
        pipeline.connect_input('required', identity, 'a')
        pipeline.connect_output('derivable', identity, 'a')
        return pipeline

    def pipeline2(self, **name_maps):
        pipeline = self.new_pipeline('pipeline2',
                                     desc="",
                                     citations=[],
                                     name_maps=name_maps)
        identity = pipeline.add('identity', IdentityInterface(['a', 'b']))
        pipeline.connect_input('required', identity, 'a')
        pipeline.connect_input('optional', identity, 'b')
        pipeline.connect_output('missing_input', identity, 'a')
        return pipeline

    def pipeline3(self, **name_maps):
        pipeline = self.new_pipeline('pipeline3',
                                     desc="",
                                     citations=[],
                                     name_maps=name_maps)
        identity = pipeline.add('identity', IdentityInterface(['a', 'b']))
        pipeline.connect_input('required', identity, 'a')
        pipeline.connect_input('required', identity, 'b')
        pipeline.connect_output('another_derivable', identity, 'a')
        if self.branch('switch'):
            pipeline.connect_output('requires_switch', identity, 'b')
        return pipeline

    def pipeline4(self, **name_maps):
        pipeline = self.new_pipeline('pipeline4',
                                     desc="",
                                     citations=[],
                                     name_maps=name_maps)
        identity = pipeline.add('identity', IdentityInterface(['a']))
        pipeline.connect_input('requires_switch', identity, 'a')
        pipeline.connect_output('requires_switch2', identity, 'a')
        return pipeline

    def pipeline5(self, **name_maps):
        pipeline = self.new_pipeline('pipeline5',
                                     desc="",
                                     citations=[],
                                     name_maps=name_maps)
        identity = pipeline.add('identity', IdentityInterface(['a']))
        pipeline.connect_input('required', identity, 'a')
        if self.branch('branch', 'foo'):
            pipeline.connect_output('requires_foo', identity, 'a')
        elif self.branch('branch', 'bar'):
            pipeline.connect_output('requires_bar', identity, 'a')
        else:
            self.unhandled_branch('branch')
        return pipeline
Ejemplo n.º 29
0
class PetStudy(Study, metaclass=StudyMetaClass):

    add_param_specs = [
        ParamSpec('ica_n_components', 2),
        ParamSpec('ica_type', 'spatial'),
        ParamSpec('norm_transformation', 's'),
        ParamSpec('norm_dim', 3),
        ParamSpec('norm_template',
                  os.path.join(template_path, 'PET_template.nii.gz')),
        ParamSpec('crop_xmin', 100),
        ParamSpec('crop_xsize', 130),
        ParamSpec('crop_ymin', 100),
        ParamSpec('crop_ysize', 130),
        ParamSpec('crop_zmin', 20),
        ParamSpec('crop_zsize', 100),
        ParamSpec('image_orientation_check', False)
    ]

    add_data_specs = [
        InputFilesetSpec('list_mode', list_mode_format),
        InputFilesetSpec('registered_volumes', nifti_gz_format),
        InputFilesetSpec('pet_image', nifti_gz_format),
        InputFilesetSpec('pet_data_dir', directory_format),
        InputFilesetSpec('pet_recon_dir', directory_format),
        FilesetSpec('pet_recon_dir_prepared', directory_format,
                    'pet_data_preparation_pipeline'),
        FilesetSpec('decomposed_file', nifti_gz_format, 'ICA_pipeline'),
        FilesetSpec('timeseries', nifti_gz_format, 'ICA_pipeline'),
        FilesetSpec('mixing_mat', text_format, 'ICA_pipeline'),
        FilesetSpec('registered_volume', nifti_gz_format,
                    'Image_normalization_pipeline'),
        FilesetSpec('warp_file', nifti_gz_format,
                    'Image_normalization_pipeline'),
        FilesetSpec('invwarp_file', nifti_gz_format,
                    'Image_normalization_pipeline'),
        FilesetSpec('affine_mat', text_matrix_format,
                    'Image_normalization_pipeline'),
        FieldSpec('pet_duration', int, 'pet_time_info_extraction_pipeline'),
        FieldSpec('pet_end_time', str, 'pet_time_info_extraction_pipeline'),
        FieldSpec('pet_start_time', str, 'pet_time_info_extraction_pipeline'),
        InputFieldSpec('time_offset', int),
        InputFieldSpec('temporal_length', float),
        InputFieldSpec('num_frames', int),
        FilesetSpec('ssrb_sinograms', directory_format,
                    'sinogram_unlisting_pipeline')
    ]

    def ICA_pipeline(self, **kwargs):

        pipeline = self.new_pipeline(
            name='ICA',
            desc=('Decompose a 4D fileset into a set of independent '
                  'components using FastICA'),
            citations=[],
            **kwargs)

        pipeline.add(
            'ICA',
            FastICA(n_components=self.parameter('ica_n_components'),
                    ica_type=self.parameter('ica_type')),
            inputs={'volume': ('registered_volumes', nifti_gz_format)},
            ouputs={
                'decomposed_file': ('ica_decomposition', nifti_gz_format),
                'timeseries': ('ica_timeseries', nifti_gz_format),
                'mixing_mat': ('mixing_mat', text_format)
            })

        return pipeline

    def Image_normalization_pipeline(self, **kwargs):

        pipeline = self.new_pipeline(
            name='Image_registration',
            desc=('Image registration to a template using ANTs'),
            citations=[],
            **kwargs)

        pipeline.add('ANTs',
                     AntsRegSyn(
                         out_prefix='vol2template',
                         num_dimensions=self.parameter('norm_dim'),
                         num_threads=self.processor.num_processes,
                         transformation=self.parameter('norm_transformation'),
                         ref_file=self.parameter('norm_template')),
                     inputs={'input_file': ('pet_image', nifti_gz_format)},
                     ouputs={
                         'registered_volume': ('reg_file', nifti_gz_format),
                         'warp_file': ('warp_file', nifti_gz_format),
                         'invwarp_file': ('inv_warp', nifti_gz_format),
                         'affine_mat': ('regmat', text_matrix_format)
                     })

        return pipeline

    def pet_data_preparation_pipeline(self, **kwargs):

        pipeline = self.new_pipeline(
            name='pet_data_preparation',
            desc=("Given a folder with reconstructed PET data, this "
                  "pipeline will prepare the data for the motion "
                  "correction"),
            citations=[],
            **kwargs)

        pipeline.add('prepare_pet',
                     PreparePetDir(image_orientation_check=self.parameter(
                         'image_orientation_check')),
                     inputs={'pet_dir': ('pet_recon_dir', directory_format)},
                     ouputs={
                         'pet_recon_dir_prepared':
                         ('pet_dir_prepared', directory_format)
                     },
                     requirements=[mrtrix_req.v('3.0rc3'),
                                   fsl_req.v('5.0.9')])

        return pipeline

    def pet_time_info_extraction_pipeline(self, **kwargs):

        pipeline = self.new_pipeline(
            name='pet_info_extraction',
            desc=("Extract PET time info from list-mode header."),
            citations=[],
            **kwargs)

        pipeline.add(
            'PET_time_info',
            PetTimeInfo(),
            inputs={'pet_data_dir': ('pet_data_dir', directory_format)},
            ouputs={
                'pet_end_time': ('pet_end_time', float),
                'pet_start_time': ('pet_start_time', str),
                'pet_duration': ('pet_duration', int)
            })
        return pipeline

    def sinogram_unlisting_pipeline(self, **kwargs):

        pipeline = self.new_pipeline(
            name='prepare_sinogram',
            desc=('Unlist pet listmode data into several sinograms and '
                  'perform ssrb compression to prepare data for motion '
                  'detection using PCA pipeline.'),
            citations=[],
            **kwargs)

        if not self.provided('list_mode'):
            raise BananaUsageError(
                "'list_mode' was not provided as an input to the study "
                "so cannot perform sinogram unlisting")

        prepare_inputs = pipeline.add('prepare_inputs',
                                      PrepareUnlistingInputs(),
                                      inputs={
                                          'list_mode':
                                          ('list_mode', list_mode_format),
                                          'time_offset': ('time_offset', int),
                                          'num_frames': ('num_frames', int),
                                          'temporal_len':
                                          ('temporal_length', float)
                                      })

        unlisting = pipeline.add(
            'unlisting',
            PETListModeUnlisting(),
            inputs={'list_inputs': (prepare_inputs, 'out')},
            iterfield=['list_inputs'])

        ssrb = pipeline.add(
            'ssrb',
            SSRB(),
            inputs={'unlisted_sinogram': (unlisting, 'pet_sinogram')},
            requirements=[stir_req.v('3.0')])

        pipeline.add(
            'merge_sinograms',
            MergeUnlistingOutputs(),
            inputs={'sinograms': (ssrb, 'ssrb_sinograms')},
            ouputs={'ssrb_sinograms': ('sinogram_folder', directory_format)},
            joinsource='unlisting',
            joinfield=['sinograms'])

        return pipeline
Ejemplo n.º 30
0
class DynamicPetStudy(PetStudy, metaclass=StudyMetaClass):

    add_data_specs = [
        InputFilesetSpec('pet_volumes', nifti_gz_format),
        InputFilesetSpec('regression_map', nifti_gz_format),
        FilesetSpec('pet_image', nifti_gz_format, 'Extract_vol_pipeline'),
        FilesetSpec('registered_volumes', nifti_gz_format,
                    'ApplyTransform_pipeline'),
        FilesetSpec('detrended_volumes', nifti_gz_format,
                    'Baseline_Removal_pipeline'),
        FilesetSpec('spatial_map', nifti_gz_format,
                    'Dual_Regression_pipeline'),
        FilesetSpec('ts', png_format, 'Dual_Regression_pipeline')
    ]

    add_param_specs = [
        ParamSpec('trans_template',
                  os.path.join(template_path, 'PET_template.nii.gz')),
        ParamSpec('base_remove_th', 0),
        ParamSpec('base_remove_binarize', False),
        ParamSpec('regress_th', 0),
        ParamSpec('regress_binarize', False)
    ]

    primary_scan_name = 'pet_volumes'

    def Extract_vol_pipeline(self, **kwargs):

        pipeline = self.new_pipeline(
            name='Extract_volume',
            desc=('Extract the last volume of the 4D PET timeseries'),
            citations=[],
            **kwargs)

        pipeline.add('fslroi',
                     ExtractROI(roi_file='vol.nii.gz', t_min=79, t_size=1),
                     inputs={'in_file': ('pet_volumes', nifti_gz_format)},
                     outputs={'pet_image': ('roi_file', nifti_gz_format)})

        return pipeline

    def ApplyTransform_pipeline(self, **kwargs):

        pipeline = self.new_pipeline(
            name='applytransform',
            desc=('Apply transformation the the 4D PET timeseries'),
            citations=[],
            **kwargs)

        merge_trans = pipeline.add('merge_transforms',
                                   Merge(2),
                                   inputs={
                                       'in1': ('warp_file', nifti_gz_format),
                                       'in2':
                                       ('affine_mat', text_matrix_format)
                                   })

        pipeline.add(
            'ApplyTransform',
            ApplyTransforms(reference_image=self.parameter('trans_template'),
                            interpolation='Linear',
                            input_image_type=3),
            inputs={
                'input_image': ('pet_volumes', nifti_gz_format),
                'transforms': (merge_trans, 'out')
            },
            outputs={'registered_volumes': ('output_image', nifti_gz_format)})

        return pipeline

    def Baseline_Removal_pipeline(self, **kwargs):

        pipeline = self.new_pipeline(name='Baseline_removal',
                                     desc=('PET dual regression'),
                                     citations=[],
                                     **kwargs)

        pipeline.add(
            'Baseline_removal',
            GlobalTrendRemoval(),
            inputs={'volume': ('registered_volumes', nifti_gz_format)},
            outputs={'detrended_volumes': ('detrended_file', nifti_gz_format)})

        return pipeline

    def Dual_Regression_pipeline(self, **kwargs):

        pipeline = self.new_pipeline(name='Dual_regression',
                                     desc=('PET dual regression'),
                                     citations=[],
                                     **kwargs)

        pipeline.add('PET_dr',
                     PETdr(threshold=self.parameter('regress_th'),
                           binarize=self.parameter('regress_binarize')),
                     inputs={
                         'volume': ('detrended_volumes', nifti_gz_format),
                         'regression_map': ('regression_map', nifti_gz_format)
                     },
                     outputs={
                         'spatial_map': ('spatial_map', nifti_gz_format),
                         'ts': ('timecourse', png_format)
                     })

        return pipeline