示例#1
0
    def workflow(self):

        images = self.images
        rois = self.rois
        datasource = self.data_source
        dict_sequences = self.dict_sequences
        nipype_cache = self.nipype_cache
        result_dir = self.result_dir
        sub_id = self.sub_id

        toextract = {**dict_sequences['MR-RT'], **dict_sequences['OT']}
        workflow = nipype.Workflow('features_extraction_workflow',
                                   base_dir=nipype_cache)
        datasink = nipype.Node(nipype.DataSink(base_directory=result_dir),
                               "datasink")
        substitutions = [('subid', sub_id)]
        substitutions += [('results/', '{}/'.format(self.workflow_name))]

        for key in toextract:
            session = toextract[key]
            if session['scans'] is not None:
                scans = session['scans']
                reg_scans = [x for x in scans if x.endswith('_reg')]
                segmented_masks = [x for x in scans if x in ['GTVPredicted',
                                                             'TumorPredicted',
                                                             'GTVPredicted-2modalities']]
                add_scans = [x for x in scans if x in images]
                add_masks = [x for x in scans if x in rois]
                
                for image in reg_scans:
                    for roi in segmented_masks:
                        image_name = '{}_{}_reg'.format(key, image.split('_')[0])
                        roi_name = '{}_{}'.format(key, roi.split('.nii.gz')[0])
                        features = nipype.Node(
                            interface=FeatureExtraction(),
                            name='features_extraction_{}{}'.format(image_name, roi_name))
                        features.inputs.parameter_file = '/home/fsforazz/git/core/resources/Params_MR.yaml'
                        workflow.connect(datasource, image_name, features, 'input_image')
                        workflow.connect(datasource, roi_name, features, 'rois')
                        workflow.connect(features, 'feature_files', datasink,
                                         'results.subid.{0}.@csv_file_{1}{2}'.format(
                                             key, image_name, roi_name))
                for image in add_scans:
                    for roi in add_masks:
                        image_name = '{}_{}'.format(key, image)
                        roi_name = '{}_{}'.format(key, roi.split('.nii.gz')[0])
                        features = nipype.Node(
                            interface=FeatureExtraction(),
                            name='features_extraction_{}{}'.format(image_name, roi_name))
                        features.inputs.parameter_file = '/home/fsforazz/git/core/resources/Params_MR.yaml'
                        workflow.connect(datasource, image_name, features, 'input_image')
                        workflow.connect(datasource, roi_name, features, 'rois')
                        workflow.connect(features, 'feature_files', datasink,
                                         'results.subid.{0}.@csv_file_{1}{2}'.format(
                                             key, image_name, roi_name))

        datasink.inputs.substitutions = substitutions

        return workflow
def run_mean_correl():

    main_workflow = pe.Workflow(name=mean_spectral_permut_analysis_name)
    main_workflow.base_dir = main_path

    #### infosource

    infosource = create_infosource()

    #### Data source
    #datasource = create_datasource_rada_by_reg_memory_signif_conf()
    datasource = create_datasource_correl()

    main_workflow.connect(infosource, 'freq_band_name', datasource,
                          'freq_band_name')

    #### prepare_mean_correl
    prepare_mean_correl = pe.Node(interface=PrepareMeanCorrel(),
                                  name='prepare_mean_correl')

    #prepare_mean_correl.inputs.gm_mask_coords_file = ref_coords_file
    prepare_mean_correl.inputs.gm_mask_labels_file = ref_labels_file

    main_workflow.connect(datasource, ('Z_cor_mat_files', force_list),
                          prepare_mean_correl, 'cor_mat_files')
    main_workflow.connect(datasource, ('labels_files', force_list),
                          prepare_mean_correl, 'labels_files')
    #main_workflow.connect(datasource, ('coords_files',force_list),prepare_mean_correl,'coords_files')

    ### shuffle matrix
    shuffle_matrix = pe.Node(interface=ShuffleMatrix(), name='shuffle_matrix')

    main_workflow.connect(prepare_mean_correl, 'avg_cor_mat_matrix_file',
                          shuffle_matrix, 'original_matrix_file')
    main_workflow.connect(infosource, 'permut', shuffle_matrix, 'seed')

    ################################################ modular decomposition on norm_coclass ############################################

    if 'rada' in mean_spectral_permut_analysis_name.split('_'):

        graph_den_pipe = create_pipeline_conmat_to_graph_density(
            pipeline_name="graph_den_pipe",
            main_path=main_path,
            multi=False,
            con_den=mean_con_den,
            mod=True,
            plot=False,
            optim_seq=mean_radatools_optim)
        #graph_den_pipe = create_pipeline_conmat_to_graph_density("graph_den_pipe",main_path,multi = False, con_den = con_den)

        main_workflow.connect(shuffle_matrix, 'shuffled_matrix_file',
                              graph_den_pipe, 'inputnode.conmat_file')

        graph_den_pipe.inputs.inputnode.labels_file = ref_labels_file
        graph_den_pipe.inputs.inputnode.coords_file = ref_coords_file

    return main_workflow
示例#3
0
    def workflow(self):

        #         self.datasource()

        datasource = self.data_source
        dict_sequences = self.dict_sequences
        nipype_cache = self.nipype_cache
        result_dir = self.result_dir
        sub_id = self.sub_id

        tobet = {**dict_sequences['MR-RT'], **dict_sequences['OT']}
        workflow = nipype.Workflow('brain_extraction_workflow',
                                   base_dir=nipype_cache)
        datasink = nipype.Node(nipype.DataSink(base_directory=result_dir),
                               "datasink")
        substitutions = [('subid', sub_id)]
        substitutions += [('results/', '{}/'.format(self.workflow_name))]
        substitutions += [('_preproc_corrected.', '_preproc.')]
        datasink.inputs.substitutions = substitutions

        for key in tobet:
            files = []
            #             if tobet[key]['ref'] is not None:
            #                 files.append(tobet[key]['ref'])
            if tobet[key]['scans'] is not None:
                files = files + tobet[key]['scans']
            for el in files:
                el = el.strip(self.extention)
                node_name = '{0}_{1}'.format(key, el)
                bet = nipype.Node(interface=HDBet(),
                                  name='{}_bet'.format(node_name),
                                  serial=True)
                bet.inputs.save_mask = 1
                bet.inputs.out_file = '{}_preproc'.format(el)
                reorient = nipype.Node(interface=Reorient2Std(),
                                       name='{}_reorient'.format(node_name))
                if el in TON4:
                    n4 = nipype.Node(interface=N4BiasFieldCorrection(),
                                     name='{}_n4'.format(node_name))
                    workflow.connect(bet, 'out_file', n4, 'input_image')
                    workflow.connect(bet, 'out_mask', n4, 'mask_image')
                    workflow.connect(
                        n4, 'output_image', datasink,
                        'results.subid.{0}.@{1}_preproc'.format(key, el))
                else:
                    workflow.connect(
                        bet, 'out_file', datasink,
                        'results.subid.{0}.@{1}_preproc'.format(key, el))
                workflow.connect(
                    bet, 'out_mask', datasink,
                    'results.subid.{0}.@{1}_preproc_mask'.format(key, el))
                workflow.connect(reorient, 'out_file', bet, 'input_file')
                workflow.connect(datasource, node_name, reorient, 'in_file')

        return workflow
示例#4
0
文件: bet.py 项目: sforazz/basecore
def brain_extraction(sub_id,
                     datasource,
                     sessions,
                     RESULT_DIR,
                     NIPYPE_CACHE,
                     reference,
                     t10=True):

    bet = nipype.MapNode(interface=HDBet(),
                         iterfield=['input_file'],
                         name='bet')
    bet.inputs.save_mask = 1
    bet.inputs.out_file = 'T1_preproc'

    if t10:
        bet_t10 = nipype.Node(interface=HDBet(), name='t1_0_bet')
        bet_t10.inputs.save_mask = 1
        bet_t10.inputs.out_file = 'T1_0_bet'

    datasink = nipype.Node(nipype.DataSink(base_directory=RESULT_DIR),
                           "datasink")

    substitutions = [('subid', sub_id)]
    for i, session in enumerate(sessions):

        substitutions += [('_bet{}/'.format(i), session + '/')]

    datasink.inputs.substitutions = substitutions
    # Create Workflow
    workflow = nipype.Workflow('brain_extraction_workflow',
                               base_dir=NIPYPE_CACHE)

    workflow.connect(datasource, 't1', bet, 'input_file')
    if t10:
        workflow.connect(datasource, 't1_0', bet_t10, 'input_file')
        workflow.connect(bet_t10, 'out_file', datasink,
                         'results.subid.T10.@T1_ref_bet')

    workflow.connect(bet, 'out_file', datasink, 'results.subid.@T1_preproc')
    workflow.connect(bet, 'out_mask', datasink, 'results.subid.@T1_mask')

    workflow = datasink_base(datasink,
                             datasource,
                             workflow,
                             sessions,
                             reference,
                             t10=t10)

    return workflow
示例#5
0
def gbm_datasource(sub_id, BASE_DIR):

    sessions = [x for x in os.listdir(os.path.join(BASE_DIR, sub_id))
                if 'REF' not in x and 'T10' not in x and 'RT_' not in x]
    datasource = nipype.Node(
        interface=nipype.DataGrabber(
            infields=['sub_id', 'sessions', 'ref_ct', 'ref_t1'],
            outfields=['t1', 'ct1', 't2', 'flair', 'reference', 't1_0']),
            name='datasource')
    datasource.inputs.base_directory = BASE_DIR
    datasource.inputs.template = '*'
    datasource.inputs.sort_filelist = True
    datasource.inputs.raise_on_empty = False
    datasource.inputs.field_template = dict(t1='%s/%s/T1.nii.gz', ct1='%s/%s/CT1.nii.gz',
                                            t2='%s/%s/T2.nii.gz', flair='%s/%s/FLAIR.nii.gz',
                                            reference='%s/%s/CT.nii.gz',
                                            t1_0='%s/%s/T1.nii.gz')
    datasource.inputs.template_args = dict(t1=[['sub_id', 'sessions']],
                                           ct1=[['sub_id', 'sessions']],
                                           t2=[['sub_id', 'sessions']],
                                           flair=[['sub_id', 'sessions']],
                                           reference=[['sub_id', 'ref_ct']],
                                           t1_0=[['sub_id', 'ref_t1']])
    datasource.inputs.sub_id = sub_id
    datasource.inputs.sessions = sessions
    datasource.inputs.ref_ct = 'REF'
    datasource.inputs.ref_t1 = 'T10'
    
    return datasource, sessions
def create_datasource_correl():

    datasource = pe.Node(interface=nio.DataGrabber(
        infields=['freq_band_name'],
        outfields=['Z_cor_mat_files', 'coords_files', 'labels_files']),
                         name='datasource')

    datasource.inputs.base_directory = main_path

    datasource.inputs.template = '%s/%s/_freq_band_name_%s_sess_index_*_subject_id_*/%s/%s%s'

    datasource.inputs.template_args = dict(Z_cor_mat_files=[[
        spectral_analysis_name, "ts_to_conmat", 'freq_band_name', "spectral",
        "conmat_0_coh", ".npy"
    ]],
                                           coords_files=[[
                                               spectral_analysis_name, "",
                                               'freq_band_name', "create_ts",
                                               "correct_channel_coords", ".txt"
                                           ]],
                                           labels_files=[[
                                               spectral_analysis_name, "",
                                               'freq_band_name', "create_ts",
                                               "correct_channel_names", ".txt"
                                           ]])

    datasource.inputs.sort_filelist = True

    return datasource
示例#7
0
def datasink_base(datasink, datasource, workflow, sessions):

    split_ds_nodes = []
    for i in range(len(sequences)):
        split_ds = nipype.Node(interface=Split(), name='split_ds{}'.format(i))
        split_ds.inputs.splits = [1]*len(sessions)
        split_ds_nodes.append(split_ds)


    for i, node in enumerate(split_ds_nodes):
        if len(sessions) > 1:
            workflow.connect(datasource, sequences[i], node,
                             'inlist')
            for j, sess in enumerate(sessions):
                workflow.connect(node, 'out{}'.format(j+1),
                                 datasink, 'results.subid.{0}.@{1}'
                                 .format(sess, sequences[i]))
        else:
            workflow.connect(datasource, sequences[i], datasink,
                             'results.subid.{0}.@{1}'.format(sessions[0],
                                                             sequences[i]))
    workflow.connect(datasource, 'reference', datasink,
                     'results.subid.REF.@ref_ct')

    workflow.connect(datasource, 't1_0', datasink,
                     'results.subid.T10.@ref_t1')
    return workflow
def create_infosource():

    infosource = pe.Node(
        interface=niu.IdentityInterface(fields=['permut', 'freq_band_name']),
        name="infosource")

    infosource.iterables = [('freq_band_name', freq_band_names),
                            ('permut', range(-1, nb_permuts))]
    return infosource
示例#9
0
def single_tp_segmentation_datasource(sub_id, BASE_DIR):

    sessions = [
        x for x in os.listdir(os.path.join(BASE_DIR, sub_id))
        if 'REF' not in x and 'T10' not in x and 'RT_' not in x
        and os.path.isdir(os.path.join(BASE_DIR, sub_id, x))
    ]
    ref_session = [
        x for x in os.listdir(os.path.join(BASE_DIR, sub_id))
        if x == 'REF' and os.path.isdir(os.path.join(BASE_DIR, sub_id, x))
    ]
    if ref_session:
        reference = True
    else:
        print('NO REFERENCE CT!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
        reference = False

    datasource = nipype.Node(interface=nipype.DataGrabber(
        infields=['sub_id', 'sessions', 'ref_ct'],
        outfields=[
            't1', 'ct1', 't2', 'flair', 'reference', 'reg2t1_warp',
            'reg2t1_mat', 'ct1_preproc', 'flair_preproc', 't1_preproc',
            't2_preproc', 'regT12CT'
        ]),
                             name='datasource')
    datasource.inputs.base_directory = BASE_DIR
    datasource.inputs.template = '*'
    datasource.inputs.sort_filelist = True
    datasource.inputs.raise_on_empty = False
    datasource.inputs.field_template = dict(
        t1='%s/%s/T1.nii.gz',
        ct1='%s/%s/CT1.nii.gz',
        t2='%s/%s/T2.nii.gz',
        flair='%s/%s/FLAIR.nii.gz',
        reference='%s/%s/CT.nii.gz',
        regT12CT='%s/%s/reg2T1_ref.mat',
        ct1_preproc='%s/%s/CT1_preproc.nii.gz',
        t1_preproc='%s/%s/T1_preproc.nii.gz',
        t2_preproc='%s/%s/T2_preproc.nii.gz',
        flair_preproc='%s/%s/FLAIR_preproc.nii.gz')
    datasource.inputs.template_args = dict(
        t1=[['sub_id', 'sessions']],
        ct1=[['sub_id', 'sessions']],
        t2=[['sub_id', 'sessions']],
        flair=[['sub_id', 'sessions']],
        reference=[['sub_id', 'ref_ct']],
        regT12CT=[['sub_id', 'sessions']],
        ct1_preproc=[['sub_id', 'sessions']],
        t1_preproc=[['sub_id', 'sessions']],
        t2_preproc=[['sub_id', 'sessions']],
        flair_preproc=[['sub_id', 'sessions']])
    datasource.inputs.sub_id = sub_id
    datasource.inputs.sessions = sessions
    datasource.inputs.ref_ct = 'REF'

    return datasource, sessions, reference
示例#10
0
    def workflow(self):

        datasource = self.data_source
        dict_sequences = self.dict_sequences
        nipype_cache = self.nipype_cache
        result_dir = self.result_dir
        sub_id = self.sub_id

        toseg = {**dict_sequences['OT']}
        workflow = nipype.Workflow('lung_segmentation_workflow',
                                   base_dir=nipype_cache)
        datasink = nipype.Node(nipype.DataSink(base_directory=result_dir),
                               "datasink")
        substitutions = [('subid', sub_id)]
        substitutions += [('results/', '{}/'.format(self.workflow_name))]
        substitutions += [('_preproc_corrected.', '_preproc.')]
        datasink.inputs.substitutions = substitutions

        for key in toseg:
            files = []
            #             if tobet[key]['ref'] is not None:
            #                 files.append(tobet[key]['ref'])
            if toseg[key]['scans'] is not None:
                files = files + toseg[key]['scans']
            for el in files:
                el = el.strip(self.extention)
                node_name = '{0}_{1}'.format(key, el)
                preproc = nipype.Node(interface=LungSegmentationPreproc(),
                                      name='{}_ls_preproc'.format(node_name))
                preproc.inputs.new_spacing = self.new_spacing
                lung_seg = nipype.Node(interface=LungSegmentationInference(),
                                       name='{}_ls'.format(node_name))
                lung_seg.inputs.weights = self.network_weights

                workflow.connect(datasource, node_name, preproc, 'in_file')
                workflow.connect(preproc, 'tensor', lung_seg, 'tensor')
                workflow.connect(preproc, 'image_info', lung_seg, 'image_info')
                workflow.connect(
                    lung_seg, 'segmented_lungs', datasink,
                    'results.subid.{0}.@{1}_segmented_lungs'.format(key, el))

        return workflow
示例#11
0
文件: rt.py 项目: TRO-HIT/PyCURT
    def workflow(self):

        self.datasource()
        datasource = self.data_source
        nipype_cache = self.nipype_cache
        result_dir = self.result_dir
        sub_id = self.sub_id
        regex = self.regex
        roi_selection = self.roi_selection
        if datasource is not None:

            workflow = nipype.Workflow('rtstruct_extraction_workflow', base_dir=nipype_cache)
        
            datasink = nipype.Node(nipype.DataSink(base_directory=result_dir), "datasink")
            substitutions = [('subid', sub_id)]
            substitutions += [('results/', '{}/'.format(self.workflow_name))]
    
            ss_convert = nipype.MapNode(interface=RTStructureCoverter(),
                                       iterfield=['reference_ct', 'input_ss'],
                                       name='ss_convert')
            mha_convert = nipype.MapNode(interface=MHA2NIIConverter(),
                                         iterfield=['input_folder'],
                                         name='mha_convert')
            
            if roi_selection:
                select = nipype.MapNode(interface=CheckRTStructures(),
                                        iterfield=['rois', 'dose_file'],
                                        name='select_gtv')
                workflow.connect(mha_convert, 'out_files', select, 'rois')
                workflow.connect(datasource, 'rt_dose', select, 'dose_file')
                workflow.connect(select, 'checked_roi', datasink,
                                 'results.subid.@masks')
            else:
                workflow.connect(mha_convert, 'out_files', datasink,
                                 'results.subid.@masks')

            for i, session in enumerate(self.rt['session']):
                substitutions += [(('_select_gtv{}/'.format(i), session+'/'))]
                substitutions += [(('_voxelizer{}/'.format(i), session+'/'))]
                substitutions += [(('_mha_convert{}/'.format(i), session+'/'))]

            datasink.inputs.substitutions =substitutions
        
            workflow.connect(datasource, 'rtct_nifti', ss_convert, 'reference_ct')
            workflow.connect(datasource, 'rts_dcm', ss_convert, 'input_ss')
            workflow.connect(ss_convert, 'out_structures', mha_convert, 'input_folder')
    
            workflow = self.datasink(workflow, datasink)
        else:
            workflow = nipype.Workflow('rtstruct_extraction_workflow', base_dir=nipype_cache)

        return workflow
示例#12
0
    def create_datasource(self):

        datasource = nipype.Node(interface=nipype.DataGrabber(
            infields=['sub_id'], outfields=self.outfields),
                                 name='datasource')
        datasource.inputs.base_directory = self.input_dir
        datasource.inputs.template = '*'
        datasource.inputs.sort_filelist = True
        datasource.inputs.raise_on_empty = False
        datasource.inputs.field_template = self.field_template
        datasource.inputs.template_args = self.template_args
        datasource.inputs.sub_id = self.sub_id

        return datasource
示例#13
0
    def datasink(self, workflow, workflow_datasink):

        datasource = self.data_source
        sequences1 = [
            x for x in datasource.inputs.field_template.keys()
            if x != 't1_0' and x != 'reference' and x != 'rt'
            and x != 'rt_dose' and x != 'doses' and x != 'rts_dcm'
            and x != 'rtstruct' and x != 'physical' and x != 'rbe'
            and x != 'rtct' and x != 'rtct_nifti'
        ]
        rt = [x for x in datasource.inputs.field_template.keys() if x == 'rt']

        split_ds_nodes = []
        for i in range(len(sequences1)):
            sessions_wit_seq = [
                x for y in self.sessions for x in glob.glob(
                    os.path.join(self.base_dir, self.sub_id, y,
                                 sequences1[i].upper() + '.nii.gz'))
            ]
            split_ds = nipype.Node(interface=Split(),
                                   name='split_ds{}'.format(i))
            split_ds.inputs.splits = [1] * len(sessions_wit_seq)
            split_ds_nodes.append(split_ds)

            if len(sessions_wit_seq) > 1:
                workflow.connect(datasource, sequences1[i], split_ds, 'inlist')
                for j, sess in enumerate(sessions_wit_seq):
                    sess_name = sess.split('/')[-2]
                    workflow.connect(
                        split_ds, 'out{}'.format(j + 1), workflow_datasink,
                        'results.subid.{0}.@{1}'.format(
                            sess_name, sequences1[i]))
            elif len(sessions_wit_seq) == 1:
                workflow.connect(
                    datasource, sequences1[i], workflow_datasink,
                    'results.subid.{0}.@{1}'.format(
                        sessions_wit_seq[0].split('/')[-2], sequences1[i]))
        if self.reference:
            workflow.connect(datasource, 'reference', workflow_datasink,
                             'results.subid.REF.@ref_ct')
        if self.t10:
            workflow.connect(datasource, 't1_0', workflow_datasink,
                             'results.subid.T10.@ref_t1')
        if rt:
            workflow.connect(datasource, 'rt', workflow_datasink,
                             'results.subid.@rt')
        return workflow
示例#14
0
    def create_datasource(self):

        datasource = nipype.Node(interface=nipype.DataGrabber(
            infields=['sub_id', 'sessions', 'ref_ct', 'ref_t1'],
            outfields=self.outfields),
                                 name='datasource')
        datasource.inputs.base_directory = self.base_dir
        datasource.inputs.template = '*'
        datasource.inputs.sort_filelist = True
        datasource.inputs.raise_on_empty = False
        datasource.inputs.field_template = self.field_template
        datasource.inputs.template_args = self.template_args
        datasource.inputs.sub_id = self.sub_id
        datasource.inputs.sessions = self.sessions
        datasource.inputs.ref_ct = 'REF'
        datasource.inputs.ref_t1 = 'T10'
        if self.rt is not None:
            datasource.inputs.rt = self.rt['session']
        if self.ct_sessions:
            datasource.inputs.ct_session = self.ct_sessions

        return datasource
示例#15
0
                else:
                    previous = [x for x in data if sub_name in x][0]
                    previous_tp = previous.split('/')[-1]
                    if current_tp > previous_tp:
                        data.remove(previous)
                        data.append(os.path.join(sub_name, current_tp))
    return data


base_dir = '/mnt/sdb/anal_sorted/'
cache_dir = '/mnt/sdb/feat_cache'
result_dir = '/mnt/sdb/anal_fe'
sub_list = creste_sub_list(base_dir)

datasource = nipype.Node(interface=nipype.DataGrabber(
    infields=['sub_id'], outfields=['ct', 'rtstruct']),
                         name='datasource')
datasource.inputs.base_directory = base_dir
datasource.inputs.template = '*'
datasource.inputs.sort_filelist = True
datasource.inputs.field_template = dict(ct='%s/CT.nii.gz',
                                        rtstruct='%s/RTSTRUCT/*.dcm')
datasource.inputs.template_args = dict(ct=[['sub_id']], rtstruct=[['sub_id']])
datasource.inputs.raise_on_empty = False
datasource.inputs.sub_id = sub_list

voxelizer = nipype.MapNode(interface=Voxelizer(),
                           iterfield=['reference', 'struct_file'],
                           name='voxelizer')
voxelizer.inputs.regular_expression = '.*PTV.*'
voxelizer.inputs.multi_structs = True
示例#16
0
cache_dir = '/mnt/sdb/nipype_reg_cache'
result_dir = '/mnt/sdb/Cinderella_FU_reg_all'
sub_list = [x for x in sorted(os.listdir(base_dir))
            if os.path.isdir(os.path.join(base_dir,x))
            and glob.glob(os.path.join(base_dir,x,'*/CT.nii.gz'))]

for sub in sub_list:
    for contrast in contrasts:
        sessions = [x.split('/')[-2] for x in sorted(glob.glob(os.path.join(
            base_dir, sub, '*', '{}.nii.gz'.format(contrast))))]
        ref_tp = [x.split('/')[-2] for x in sorted(glob.glob(os.path.join(
            base_dir, sub, '*', 'CT.nii.gz')))]
        if sessions and ref_tp:
            ref_tp = ref_tp[-1]
            datasource = nipype.Node(
                interface=nipype.DataGrabber(infields=['contrasts', 'sub_id', 'sessions', 'ref_tp'],
                                             outfields=['reference', 'to_reg']), name='datasource')
            datasource.inputs.base_directory = base_dir
            datasource.inputs.template = '*'
            datasource.inputs.sort_filelist = True
            datasource.inputs.field_template = dict(reference='%s/%s/%sCT.nii.gz',
                                                    to_reg='%s/%s/%s.nii.gz')
            datasource.inputs.template_args = dict(to_reg=[['sub_id', 'sessions', 'contrasts']],
                                                   reference=[['sub_id', 'ref_tp', '']])
            datasource.inputs.raise_on_empty = False
            datasource.inputs.contrasts = contrast
            datasource.inputs.sub_id = sub
            datasource.inputs.sessions = sessions
            datasource.inputs.ref_tp = ref_tp

            reg = nipype.MapNode(interface=AntsRegSyn(), iterfield=['input_file'], name='ants_reg')
示例#17
0
    def convertion_workflow(self):

        self.datasource()

        datasource = self.data_source
        dict_sequences = self.dict_sequences
        nipype_cache = self.nipype_cache
        result_dir = self.result_dir
        sub_id = self.sub_id

        toprocess = {**dict_sequences['MR-RT'], **dict_sequences['OT']}
        workflow = nipype.Workflow('data_convertion_workflow',
                                   base_dir=nipype_cache)
        datasink = nipype.Node(nipype.DataSink(base_directory=result_dir),
                               "datasink")
        substitutions = [('subid', sub_id)]
        substitutions += [('results/', '{}/'.format(self.workflow_name))]
        substitutions += [('checked_dicoms', 'RTSTRUCT_used')]
        datasink.inputs.substitutions = substitutions

        for key in toprocess:
            files = []
            if toprocess[key]['scans'] is not None:
                files = files + toprocess[key]['scans']
            for el in files:
                el = el.strip(self.extention)
                node_name = '{0}_{1}'.format(key, el)
                dc = nipype.Node(interface=DicomCheck(),
                                 name='{}_dc'.format(node_name))
                workflow.connect(datasource, node_name, dc, 'dicom_dir')
                converter = nipype.Node(interface=Dcm2niix(),
                                        name='{}_convert'.format(node_name))
                converter.inputs.compress = 'y'
                converter.inputs.philips_float = False
                if el == 'CT':
                    converter.inputs.merge_imgs = True
                else:
                    converter.inputs.merge_imgs = False
                check = nipype.Node(interface=ConversionCheck(),
                                    name='{}_cc'.format(node_name))
                workflow.connect(dc, 'outdir', converter, 'source_dir')
                workflow.connect(dc, 'scan_name', converter, 'out_filename')
                workflow.connect(dc, 'scan_name', check, 'file_name')
                workflow.connect(converter, 'converted_files', check,
                                 'in_file')
                workflow.connect(
                    check, 'out_file', datasink,
                    'results.subid.{0}.@{1}_converted'.format(key, el))

                check = nipype.Node(interface=ConversionCheck(),
                                    name='{}_cc'.format(node_name))

        for key in dict_sequences['RT']:
            doses = []
            if dict_sequences['RT'][key]['phy_dose'] is not None:
                doses.append('{}_phy_dose'.format(key))
            if dict_sequences['RT'][key]['rbe_dose'] is not None:
                doses.append('{}_rbe_dose'.format(key))
            for el in doses:
                el = el.strip(self.extention)
                node_name = el.strip(self.extention)
                converter = nipype.Node(interface=DoseConverter(),
                                        name='{}_dose_conv'.format(node_name))
                dc = nipype.Node(interface=DicomCheck(),
                                 name='{}_dc'.format(node_name))
                workflow.connect(datasource, node_name, dc, 'dicom_dir')
                workflow.connect(dc, 'dose_file', converter, 'input_dose')
                workflow.connect(dc, 'scan_name', converter, 'out_name')
                workflow.connect(
                    converter, 'out_file', datasink,
                    'results.subid.{0}.@{1}_converted'.format(key, el))
            if dict_sequences['RT'][key]['ot_dose'] is not None:
                el = '{}_ot_dose'.format(key)
                node_name = el.strip(self.extention)
                converter = nipype.Node(interface=DoseConverter(),
                                        name='{}_convert'.format(node_name))
                get_dose = nipype.Node(interface=GetRefRTDose(),
                                       name='{}_get_dose'.format(node_name))
                workflow.connect(datasource, node_name, get_dose, 'doses')
                workflow.connect(get_dose, 'dose_file', converter,
                                 'input_dose')
                converter.inputs.out_name = 'Unused_RTDOSE.nii.gz'
                workflow.connect(
                    converter, 'out_file', datasink,
                    'results.subid.{0}.@{1}_converted'.format(key, el))
            if dict_sequences['RT'][key]['rtct'] is not None:
                el = '{}_rtct'.format(key)
                node_name = el.strip(self.extention)
                converter = nipype.Node(interface=Dcm2niix(),
                                        name='{}_convert'.format(node_name))
                converter.inputs.compress = 'y'
                converter.inputs.philips_float = False
                converter.inputs.merge_imgs = True
                dc = nipype.Node(interface=DicomCheck(),
                                 name='{}_dc'.format(node_name))
                workflow.connect(datasource, node_name, dc, 'dicom_dir')
                check = nipype.Node(interface=ConversionCheck(),
                                    name='{}_cc'.format(node_name))
                workflow.connect(dc, 'outdir', converter, 'source_dir')
                workflow.connect(dc, 'scan_name', converter, 'out_filename')
                workflow.connect(dc, 'scan_name', check, 'file_name')
                workflow.connect(converter, 'converted_files', check,
                                 'in_file')
                workflow.connect(
                    check, 'out_file', datasink,
                    'results.subid.{0}.@{1}_converted'.format(key, el))
            if dict_sequences['RT'][key]['rtstruct'] is not None:
                el = '{}_rtstruct'.format(key)
                node_name = el.strip(self.extention)
                dc = nipype.Node(interface=DicomCheck(),
                                 name='{}_dc'.format(node_name))
                workflow.connect(datasource, node_name, dc, 'dicom_dir')
                workflow.connect(dc, 'outdir', datasink,
                                 'results.subid.{0}.@rtstruct'.format(key, el))

        return workflow
示例#18
0
    def workflow(self):

        datasource = self.data_source
        dict_sequences = self.dict_sequences
        nipype_cache = self.nipype_cache
        result_dir = self.result_dir
        sub_id = self.sub_id

        toreg = {**dict_sequences['MR-RT'], **dict_sequences['OT']}
        workflow = nipype.Workflow('registration_workflow',
                                   base_dir=nipype_cache)
        datasink = nipype.Node(nipype.DataSink(base_directory=result_dir),
                               "datasink")
        substitutions = [('subid', sub_id)]
        substitutions += [('results/', '{}/'.format(self.workflow_name))]

        mr_rt_ref = None
        rtct = None

        if dict_sequences['MR-RT'] and self.normilize_mr_rt:
            ref_session = list(dict_sequences['MR-RT'].keys())[0]
            ref_scans = dict_sequences['MR-RT'][ref_session]['scans']
            for pr in POSSIBLE_REF:
                for scan in ref_scans:
                    if pr in scan.split('_')[0]:
                        mr_rt_ref = '{0}_{1}_preproc'.format(
                            ref_session,
                            scan.split('_')[0])
                        mr_rt_ref_name = '{}_preproc'.format(
                            scan.split('_')[0])
                        break
                else:
                    continue
                break
        if dict_sequences['RT'] and self.normilize_rtct:
            rt_session = list(dict_sequences['RT'].keys())[0]
            ct_name = dict_sequences['RT'][rt_session]['rtct']
            if ct_name is not None and mr_rt_ref is not None:
                rtct = '{0}_rtct'.format(rt_session, ct_name)
                reg_mr2ct = nipype.Node(interface=AntsRegSyn(),
                                        name='{}_lin_reg'.format(rt_session))
                reg_mr2ct.inputs.transformation = 'r'
                reg_mr2ct.inputs.num_dimensions = 3
                reg_mr2ct.inputs.num_threads = 4
                reg_mr2ct.inputs.out_prefix = '{}_reg2RTCT'.format(
                    mr_rt_ref_name)
                reg_mr2ct.inputs.interpolation = 'BSpline'
                workflow.connect(datasource, mr_rt_ref, reg_mr2ct,
                                 'input_file')
                workflow.connect(datasource, rtct, reg_mr2ct, 'ref_file')
                workflow.connect(
                    reg_mr2ct, 'regmat', datasink,
                    'results.subid.{0}.@{1}_reg2RTCT_mat'.format(
                        ref_session, mr_rt_ref_name))
                workflow.connect(
                    reg_mr2ct, 'reg_file', datasink,
                    'results.subid.{0}.@{1}_reg2RTCT'.format(
                        ref_session, mr_rt_ref_name))
                substitutions += [
                    ('{}_reg2RTCTWarped.nii.gz'.format(mr_rt_ref_name),
                     '{}_reg2RTCT.nii.gz'.format(mr_rt_ref_name))
                ]
                substitutions += [
                    ('{}_reg2RTCT0GenericAffine.mat'.format(mr_rt_ref_name),
                     '{}_reg2RTCT_linear_mat.mat'.format(mr_rt_ref_name))
                ]

        for key in toreg:
            session = toreg[key]
            if session['scans'] is not None:
                scans = session['scans']
                scans = [x for x in scans if 'mask' not in x]
                ref = None
                for pr in POSSIBLE_REF:
                    for scan in scans:
                        if pr in scan:
                            ref = '{0}_{1}_preproc'.format(
                                key,
                                scan.split('_')[0])
                            scans.remove('{}_preproc'.format(
                                scan.split('_')[0]))
                            ref_name = scan.split('_')[0]
                            workflow.connect(
                                datasource, ref, datasink,
                                'results.subid.{0}.@{1}_reg'.format(
                                    key, ref_name))
                            substitutions += [
                                ('{}_preproc'.format(scan.split('_')[0]),
                                 '{}_reg'.format(scan.split('_')[0]))
                            ]
                            break
                    else:
                        continue
                    break
                if ref is not None:
                    if mr_rt_ref is not None and key != ref_session:
                        reg_mr_rt = nipype.Node(interface=AntsRegSyn(),
                                                name='{}_def_reg'.format(key))
                        reg_mr_rt.inputs.transformation = 's'
                        reg_mr_rt.inputs.num_dimensions = 3
                        reg_mr_rt.inputs.num_threads = 6
                        reg_mr_rt.inputs.out_prefix = '{}_reg2MR_RT'.format(
                            ref_name)
                        workflow.connect(datasource, ref, reg_mr_rt,
                                         'input_file')
                        workflow.connect(datasource, mr_rt_ref, reg_mr_rt,
                                         'ref_file')
                        workflow.connect(
                            reg_mr_rt, 'regmat', datasink,
                            'results.subid.{0}.@{1}_reg2MR_RT_linear_mat'.
                            format(key, ref_name))
                        workflow.connect(
                            reg_mr_rt, 'reg_file', datasink,
                            'results.subid.{0}.@{1}_reg2MR_RT'.format(
                                key, ref_name))
                        workflow.connect(
                            reg_mr_rt, 'warp_file', datasink,
                            'results.subid.{0}.@{1}_reg2MR_RT_warp'.format(
                                key, ref_name))
                        substitutions += [
                            ('{}_reg2MR_RT0GenericAffine.mat'.format(ref_name),
                             '{}_reg2MR_RT_linear_mat.mat'.format(ref_name))
                        ]
                        substitutions += [
                            ('{}_reg2MR_RT1Warp.nii.gz'.format(ref_name),
                             '{}_reg2MR_RT_warp.nii.gz'.format(ref_name))
                        ]
                        substitutions += [
                            ('{}_reg2MR_RTWarped.nii.gz'.format(ref_name),
                             '{}_reg2MR_RT.nii.gz'.format(ref_name))
                        ]
                    if rtct is not None and key != ref_session:
                        apply_ts_rt_ref = nipype.Node(
                            interface=ApplyTransforms(),
                            name='{}_norm2RT'.format(ref_name))
                        apply_ts_rt_ref.inputs.output_image = (
                            '{}_reg2RTCT.nii.gz'.format(ref_name))
                        workflow.connect(datasource, ref, apply_ts_rt_ref,
                                         'input_image')
                        workflow.connect(datasource, rtct, apply_ts_rt_ref,
                                         'reference_image')
                        workflow.connect(
                            apply_ts_rt_ref, 'output_image', datasink,
                            'results.subid.{0}.@{1}_reg2RTCT'.format(
                                key, ref_name))
                        merge_rt_ref = nipype.Node(
                            interface=Merge(4),
                            name='{}_merge_rt'.format(ref_name))
                        merge_rt_ref.inputs.ravel_inputs = True
                        workflow.connect(reg_mr2ct, 'regmat', merge_rt_ref,
                                         'in1')
                        workflow.connect(reg_mr_rt, 'regmat', merge_rt_ref,
                                         'in3')
                        workflow.connect(reg_mr_rt, 'warp_file', merge_rt_ref,
                                         'in2')
                        workflow.connect(merge_rt_ref, 'out', apply_ts_rt_ref,
                                         'transforms')

                    for el in scans:
                        el = el.strip(self.extention)
                        el_name = el.split('_')[0]
                        node_name = '{0}_{1}'.format(key, el)
                        reg = nipype.Node(interface=AntsRegSyn(),
                                          name='{}_lin_reg'.format(node_name))
                        reg.inputs.transformation = 'r'
                        reg.inputs.num_dimensions = 3
                        reg.inputs.num_threads = 4
                        reg.inputs.interpolation = 'BSpline'
                        reg.inputs.out_prefix = '{}_reg'.format(el_name)
                        workflow.connect(datasource, node_name, reg,
                                         'input_file')
                        workflow.connect(datasource, ref, reg, 'ref_file')
                        workflow.connect(
                            reg, 'reg_file', datasink,
                            'results.subid.{0}.@{1}_reg'.format(key, el_name))
                        workflow.connect(
                            reg, 'regmat', datasink,
                            'results.subid.{0}.@{1}_regmat'.format(
                                key, el_name))
                        substitutions += [
                            ('{}_regWarped.nii.gz'.format(el_name),
                             '{}_reg.nii.gz'.format(el_name))
                        ]
                        substitutions += [
                            ('{}_reg0GenericAffine.mat'.format(el_name),
                             '{}_linear_regmat.mat'.format(el_name))
                        ]
                        if mr_rt_ref is not None and key != ref_session:
                            merge = nipype.Node(
                                interface=Merge(3),
                                name='{}_merge_MR_RT'.format(node_name))
                            merge.inputs.ravel_inputs = True
                            workflow.connect(reg, 'regmat', merge, 'in3')
                            workflow.connect(reg_mr_rt, 'regmat', merge, 'in2')
                            workflow.connect(reg_mr_rt, 'warp_file', merge,
                                             'in1')
                            apply_ts = nipype.Node(
                                interface=ApplyTransforms(),
                                name='{}_norm2MR_RT'.format(node_name))
                            apply_ts.inputs.output_image = '{}_reg2MR_RT.nii.gz'.format(
                                el_name)
                            workflow.connect(merge, 'out', apply_ts,
                                             'transforms')
                            workflow.connect(datasource, node_name, apply_ts,
                                             'input_image')
                            workflow.connect(datasource, mr_rt_ref, apply_ts,
                                             'reference_image')
                            workflow.connect(
                                apply_ts, 'output_image', datasink,
                                'results.subid.{0}.@{1}_reg2MR_RT'.format(
                                    key, el_name))
                        if rtct is not None:
                            apply_ts_rt = nipype.Node(
                                interface=ApplyTransforms(),
                                name='{}_norm2RT'.format(node_name))
                            apply_ts_rt.inputs.output_image = '{}_reg2RTCT.nii.gz'.format(
                                el_name)
                            workflow.connect(datasource, node_name,
                                             apply_ts_rt, 'input_image')
                            workflow.connect(datasource, rtct, apply_ts_rt,
                                             'reference_image')
                            workflow.connect(
                                apply_ts_rt, 'output_image', datasink,
                                'results.subid.{0}.@{1}_reg2RTCT'.format(
                                    key, el_name))
                            if key != ref_session:
                                merge_rt = nipype.Node(
                                    interface=Merge(4),
                                    name='{}_merge_rt'.format(node_name))
                                merge_rt.inputs.ravel_inputs = True
                                workflow.connect(reg_mr2ct, 'regmat', merge_rt,
                                                 'in1')
                                workflow.connect(reg, 'regmat', merge_rt,
                                                 'in4')
                                workflow.connect(reg_mr_rt, 'regmat', merge_rt,
                                                 'in3')
                                workflow.connect(reg_mr_rt, 'warp_file',
                                                 merge_rt, 'in2')
                                workflow.connect(merge_rt, 'out', apply_ts_rt,
                                                 'transforms')
                            else:
                                merge_rt = nipype.Node(
                                    interface=Merge(2),
                                    name='{}_merge_rt'.format(node_name))
                                merge_rt.inputs.ravel_inputs = True
                                workflow.connect(reg_mr2ct, 'regmat', merge_rt,
                                                 'in1')
                                workflow.connect(reg, 'regmat', merge_rt,
                                                 'in2')
                                workflow.connect(merge_rt, 'out', apply_ts_rt,
                                                 'transforms')

        datasink.inputs.substitutions = substitutions

        return workflow
示例#19
0
    def sorting_workflow(self,
                         bp_class_mr_cp,
                         mrclass_cp,
                         mrclass_sub_cp,
                         bp_class_ct_cp,
                         bp=['hnc', 'hncKM'],
                         subject_name_position=-3,
                         renaming=False,
                         mrrt_max_time_diff=15,
                         rert_max_time=42,
                         bp_class_ct_th=0.33,
                         bp_class_mr_th=0.5,
                         mr_classification=True):

        mrclass_bp = [x for x in bp if x in ['hnc', 'abd-pel']]
        if mr_classification and not mrclass_bp:
            print('MRClass will not run')
            mr_classification = False
        folder2merge = 4
        folder2merge_iterfields = ['in1', 'in2', 'in3', 'in4']
        #         else:
        #             mr_classiffication = True
        #             folder2merge = 3
        #             folder2merge_iterfields = ['in1', 'in2', 'in3']

        nipype_cache = os.path.join(self.nipype_cache, 'data_sorting')
        result_dir = self.result_dir

        workflow = nipype.Workflow('sorting_workflow', base_dir=nipype_cache)
        datasink = nipype.Node(interface=SinkSorting(), name='datasink')
        datasink.inputs.out_folder = result_dir

        file_check = nipype.Node(interface=FileCheck(), name='fc')
        file_check.inputs.input_dir = self.input_dir
        file_check.inputs.subject_name_position = subject_name_position
        file_check.inputs.renaming = renaming
        prep = nipype.MapNode(interface=FolderPreparation(),
                              name='prep',
                              iterfield=['input_list'])
        bp_class_ct = nipype.MapNode(interface=ImageClassification(),
                                     name='bpclass_ct',
                                     iterfield=['images2label'])
        bp_class_ct.inputs.checkpoints = bp_class_ct_cp
        bp_class_ct.inputs.body_part = bp
        bp_class_ct.inputs.network = 'bpclass'
        bp_class_ct.inputs.modality = 'CT'
        bp_class_ct.inputs.probability_th = bp_class_ct_th
        mr_rt_merge = nipype.MapNode(interface=Merge(folder2merge),
                                     name='mr_rt_merge',
                                     iterfield=folder2merge_iterfields)
        mr_rt_merge.inputs.ravel_inputs = True
        merging = nipype.Node(interface=FolderMerge(), name='merge')
        merging.inputs.mrrt_max_time_diff = mrrt_max_time_diff
        merging.inputs.rert_max_time = rert_max_time
        if mr_classification:
            if mrclass_cp is None or mrclass_sub_cp is None:
                raise Exception('MRClass weights were not provided, MR image '
                                'classification cannot be performed!')
            mrclass = nipype.MapNode(interface=ImageClassification(),
                                     name='mrclass',
                                     iterfield=['images2label'])
            mrclass.inputs.checkpoints = mrclass_cp
            mrclass.inputs.sub_checkpoints = mrclass_sub_cp
            mrclass.inputs.body_part = mrclass_bp
            mrclass.inputs.network = 'mrclass'
            mrclass.inputs.modality = 'MR'

        bp_class_mr = nipype.MapNode(interface=ImageClassification(),
                                     name='bpclass_mr',
                                     iterfield=['images2label'])
        bp_class_mr.inputs.checkpoints = bp_class_mr_cp
        bp_class_mr.inputs.body_part = mrclass_bp
        bp_class_mr.inputs.network = 'bpclass'
        bp_class_mr.inputs.modality = 'MR'
        bp_class_mr.inputs.probability_th = bp_class_mr_th
        #         else:
        #             mr_rt_merge.inputs.in3 = 'None'
        rt_sorting = nipype.MapNode(interface=RTDataSorting(),
                                    name='rt_sorting',
                                    iterfield=['input_dir'])

        pet_sorting = nipype.MapNode(interface=PETDataSorting(),
                                     name='pet_sorting',
                                     iterfield=['input_dir'])

        workflow.connect(file_check, 'out_list', prep, 'input_list')
        workflow.connect(prep, 'out_folder', rt_sorting, 'input_dir')
        workflow.connect(prep, 'out_folder', pet_sorting, 'input_dir')
        workflow.connect(prep, 'for_inference_ct', bp_class_ct, 'images2label')
        workflow.connect(prep, 'for_inference_mr', bp_class_mr, 'images2label')
        workflow.connect(bp_class_ct, 'output_dict', mr_rt_merge, 'in1')
        workflow.connect(rt_sorting, 'output_dict', mr_rt_merge, 'in2')
        workflow.connect(pet_sorting, 'output_dict', mr_rt_merge, 'in4')
        workflow.connect(mr_rt_merge, 'out', merging, 'input_list')
        workflow.connect(merging, 'out_folder', datasink, 'tosink')
        if mr_classification:
            workflow.connect(bp_class_mr, 'labeled_images', mrclass,
                             'images2label')
            workflow.connect(mrclass, 'output_dict', mr_rt_merge, 'in3')
        else:
            workflow.connect(bp_class_mr, 'output_dict', mr_rt_merge, 'in3')

        return workflow
示例#20
0
def single_tp_registration(sub_id,
                           datasource,
                           session,
                           reference,
                           result_dir,
                           nipype_cache,
                           bet_workflow=None):
    """
    This is a workflow to register multi-modalities MR (T2, T1KM, FLAIR) to their 
    reference T1 image, in one single time-point cohort. In particular, for each 
    subject, this workflow will register the MR images in the provided time-point (tp)
    to the corresponding T1, then it will register the T1 image to the BPLCT (if present)'
    '. At the end, all the MR images will be saved both in T1 space and in CT space.
    """
    session = session[0]
    if reference:
        regT12CT = nipype.MapNode(interface=AntsRegSyn(),
                                  iterfield=['input_file'],
                                  name='regT12CT')
        regT12CT.inputs.transformation = 'r'
        regT12CT.inputs.num_dimensions = 3
        regT12CT.inputs.num_threads = 4

    reg_nodes = []
    for i in range(3):
        reg = nipype.MapNode(interface=AntsRegSyn(),
                             iterfield=['input_file', 'ref_file'],
                             name='ants_reg{}'.format(i))
        reg.inputs.transformation = 'r'
        reg.inputs.num_dimensions = 3
        reg.inputs.num_threads = 4
        reg.inputs.interpolation = 'BSpline'
        reg_nodes.append(reg)

    apply_mask_nodes = []
    for i in range(3):
        masking = nipype.MapNode(interface=ApplyMask(),
                                 iterfield=['in_file', 'mask_file'],
                                 name='masking{}'.format(i))
        apply_mask_nodes.append(masking)

    if reference:
        apply_ts_nodes = []
        for i in range(3):
            apply_ts = nipype.MapNode(interface=ApplyTransforms(),
                                      iterfield=['input_image', 'transforms'],
                                      name='apply_ts{}'.format(i))
            apply_ts_nodes.append(apply_ts)

        apply_ts_t1 = nipype.MapNode(interface=ApplyTransforms(),
                                     iterfield=['input_image', 'transforms'],
                                     name='apply_ts_t1')

        merge_nodes = []
        for i in range(3):
            merge = nipype.MapNode(interface=Merge(2),
                                   iterfield=['in1', 'in2'],
                                   name='merge{}'.format(i))
            merge.inputs.ravel_inputs = True
            merge_nodes.append(merge)

    datasink = nipype.Node(nipype.DataSink(base_directory=result_dir),
                           "datasink")

    substitutions = [('subid', sub_id)]
    substitutions += [('session', session)]
    substitutions += [('_regT12CT0/antsreg0GenericAffine.mat',
                       '/reg2T1_ref.mat')]
    substitutions += [('_masking00/antsregWarped_masked.nii.gz',
                       session + '/' + 'CT1_preproc.nii.gz')]
    substitutions += [('_regT12CT/antsreg0GenericAffine.mat',
                       '/regT1_ref2CT.mat')]
    substitutions += [('_masking10/antsregWarped_masked.nii.gz',
                       session + '/' + 'T2_preproc.nii.gz')]
    substitutions += [('_masking20/antsregWarped_masked.nii.gz',
                       session + '/' + 'FLAIR_preproc.nii.gz')]
    substitutions += [('_apply_ts00/antsregWarped_masked_trans.nii.gz',
                       session + '/' + 'CT1_reg2CT.nii.gz')]
    substitutions += [('_apply_ts10/antsregWarped_masked_trans.nii.gz',
                       session + '/' + 'T2_reg2CT.nii.gz')]
    substitutions += [('_apply_ts20/antsregWarped_masked_trans.nii.gz',
                       session + '/' + 'FLAIR_reg2CT.nii.gz')]
    substitutions += [('_apply_ts_t10/T1_preproc_trans.nii.gz',
                       session + '/' + 'T1_reg2CT.nii.gz')]

    datasink.inputs.substitutions = substitutions
    # Create Workflow
    workflow = nipype.Workflow('registration_workflow', base_dir=nipype_cache)

    for i, reg in enumerate(reg_nodes):
        workflow.connect(datasource, SEQUENCES[i + 1], reg, 'input_file')
        workflow.connect(datasource, SEQUENCES[0], reg, 'ref_file')
    # bring every MR in CT space
    if reference:
        for i, node in enumerate(merge_nodes):
            workflow.connect(reg_nodes[i], 'regmat', node, 'in2')
            workflow.connect(regT12CT, 'regmat', node, 'in1')
        for i, node in enumerate(apply_ts_nodes):
            workflow.connect(apply_mask_nodes[i], 'out_file', node,
                             'input_image')
            workflow.connect(datasource, 'reference', node, 'reference_image')
            workflow.connect(regT12CT, 'regmat', node, 'transforms')
            workflow.connect(
                node, 'output_image', datasink,
                'results.subid.@{}_reg2CT'.format(SEQUENCES[i + 1]))

        workflow.connect(regT12CT, 'regmat', datasink,
                         'results.subid.{0}.@regT12CT_mat'.format(session))
        workflow.connect(datasource, 'reference', regT12CT, 'ref_file')
        workflow.connect(datasource, 't1', regT12CT, 'input_file')

        if bet_workflow is not None:
            workflow.connect(bet_workflow, 'bet.out_file', apply_ts_t1,
                             'input_image')
        else:
            workflow.connect(datasource, 't1_bet', apply_ts_t1, 'input_image')
        workflow.connect(datasource, 'reference', apply_ts_t1,
                         'reference_image')
        workflow.connect(apply_ts_t1, 'output_image', datasink,
                         'results.subid.@T1_reg2CT')
        workflow.connect(regT12CT, 'regmat', apply_ts_t1, 'transforms')

    for i, mask in enumerate(apply_mask_nodes):
        workflow.connect(reg_nodes[i], 'reg_file', mask, 'in_file')
        if bet_workflow is not None:
            workflow.connect(bet_workflow, 'bet.out_mask', mask, 'mask_file')
        else:
            workflow.connect(datasource, 't1_mask', mask, 'mask_file')
        workflow.connect(mask, 'out_file', datasink,
                         'results.subid.@{}_preproc'.format(SEQUENCES[i + 1]))

    if bet_workflow is not None:
        workflow = datasink_base(datasink,
                                 datasource,
                                 workflow, [session],
                                 reference,
                                 t10=False)
    else:
        workflow = datasink_base(datasink,
                                 datasource,
                                 workflow, [session],
                                 reference,
                                 extra_nodes=['t1_bet'],
                                 t10=False)

    return workflow
示例#21
0
for n, sub in enumerate(SUB_LIST):
    for contrast in CONTRASTS:
        sub_name = sub.split('/')[-1]
        sessions = [x for x in sorted(glob.glob(os.path.join(sub, '*')))]
        ct_tp = [i for i, s in enumerate(sessions) if CTS[n] in s]
        sessions = [
            x for x in sessions[ct_tp[0] + 1:]
            if glob.glob(x + '/{}.nii.gz'.format(contrast))
        ]
        if len(sessions) > 1:
            ref_tp = sessions[0].split('/')[-1]
            sessions.remove(sessions[0])
            sessions = [x.split('/')[-1] for x in sessions]
            datasource = nipype.Node(interface=nipype.DataGrabber(
                infields=['contrasts', 'sub_id', 'sessions', 'ref_tp'],
                outfields=['reference', 'to_reg']),
                                     name='datasource')
            datasource.inputs.base_directory = BASE_DIR
            datasource.inputs.template = '*'
            datasource.inputs.sort_filelist = True
            datasource.inputs.field_template = dict(
                reference='%s/%s/%s.nii.gz', to_reg='%s/%s/%s.nii.gz')
            datasource.inputs.template_args = dict(
                to_reg=[['sub_id', 'sessions', 'contrasts']],
                reference=[['sub_id', 'ref_tp', 'contrasts']])
            datasource.inputs.raise_on_empty = False
            datasource.inputs.contrasts = contrast
            datasource.inputs.sub_id = sub.split('/')[-1]
            datasource.inputs.sessions = sessions
            datasource.inputs.ref_tp = ref_tp
示例#22
0
def longitudinal_registration(sub_id,
                              datasource,
                              sessions,
                              reference,
                              result_dir,
                              nipype_cache,
                              bet_workflow=None):
    """
    This is a workflow to register multi-modalities MR (T2, T1KM, FLAIR) to their 
    reference T1 image, in multiple time-points cohort. In particular, for each 
    subject, this workflow will register the MR images in each time-point (tp)
    to the corresponding T1, then it will register all the T1 images to a reference T1
    (the one that is the closest in time to the radiotherapy session), and finally the
    reference T1 to the BPLCT. At the end, all the MR images will be saved both in T1 space
    (for each tp) and in CT space.
    """
    reg2T1 = nipype.MapNode(interface=AntsRegSyn(),
                            iterfield=['input_file'],
                            name='reg2T1')
    reg2T1.inputs.transformation = 's'
    reg2T1.inputs.num_dimensions = 3
    reg2T1.inputs.num_threads = 6

    if reference:
        regT12CT = nipype.MapNode(interface=AntsRegSyn(),
                                  iterfield=['input_file'],
                                  name='regT12CT')
        regT12CT.inputs.transformation = 'r'
        regT12CT.inputs.num_dimensions = 3
        regT12CT.inputs.num_threads = 4

    reg_nodes = []
    for i in range(3):
        reg = nipype.MapNode(interface=AntsRegSyn(),
                             iterfield=['input_file', 'ref_file'],
                             name='ants_reg{}'.format(i))
        reg.inputs.transformation = 'r'
        reg.inputs.num_dimensions = 3
        reg.inputs.num_threads = 4
        reg.inputs.interpolation = 'BSpline'
        reg_nodes.append(reg)

    apply_mask_nodes = []
    for i in range(3):
        masking = nipype.MapNode(interface=ApplyMask(),
                                 iterfield=['in_file', 'mask_file'],
                                 name='masking{}'.format(i))
        apply_mask_nodes.append(masking)

    apply_ts_nodes = []
    for i in range(3):
        apply_ts = nipype.MapNode(interface=ApplyTransforms(),
                                  iterfield=['input_image', 'transforms'],
                                  name='apply_ts{}'.format(i))
        apply_ts_nodes.append(apply_ts)
    # Apply ts nodes for T1_ref normalization
    apply_ts_nodes1 = []
    for i in range(3):
        apply_ts = nipype.MapNode(interface=ApplyTransforms(),
                                  iterfield=['input_image', 'transforms'],
                                  name='apply_ts1{}'.format(i))
        apply_ts_nodes1.append(apply_ts)

    split_ds_nodes = []
    for i in range(4):
        split_ds = nipype.Node(interface=Split(), name='split_ds{}'.format(i))
        split_ds.inputs.splits = [1] * len(sessions)
        split_ds_nodes.append(split_ds)

    apply_ts_t1 = nipype.MapNode(interface=ApplyTransforms(),
                                 iterfield=['input_image', 'transforms'],
                                 name='apply_ts_t1')
    merge_nodes = []
    if reference:
        iterfields = ['in1', 'in2', 'in3', 'in4']
        iterfields_t1 = ['in1', 'in2', 'in3']
        if_0 = 2
    else:
        iterfields = ['in1', 'in2', 'in3']
        iterfields_t1 = ['in1', 'in2']
        if_0 = 1

    for i in range(3):
        merge = nipype.MapNode(interface=Merge(len(iterfields)),
                               iterfield=iterfields,
                               name='merge{}'.format(i))
        merge.inputs.ravel_inputs = True
        merge_nodes.append(merge)
    # Merging transforms for normalization to T1_ref
    merge_nodes1 = []
    for i in range(3):
        merge = nipype.MapNode(interface=Merge(3),
                               iterfield=['in1', 'in2', 'in3'],
                               name='merge1{}'.format(i))
        merge.inputs.ravel_inputs = True
        merge_nodes1.append(merge)

    merge_ts_t1 = nipype.MapNode(interface=Merge(len(iterfields_t1)),
                                 iterfield=iterfields_t1,
                                 name='merge_t1')
    merge_ts_t1.inputs.ravel_inputs = True

    # have to create a fake merge of the transformation from t10 to CT in order
    # to have the same number if matrices as input in mapnode
    fake_merge = nipype.Node(interface=Merge(len(sessions)), name='fake_merge')

    datasink = nipype.Node(nipype.DataSink(base_directory=result_dir),
                           "datasink")

    substitutions = [('subid', sub_id)]
    for i, session in enumerate(sessions):
        substitutions += [('session'.format(i), session)]
        substitutions += [('_masking0{}/antsregWarped_masked.nii.gz'.format(i),
                           session + '/' + 'CT1_preproc.nii.gz')]
        substitutions += [('_reg2T1{}/antsreg0GenericAffine.mat'.format(i),
                           session + '/' + 'reg2T1_ref.mat')]
        substitutions += [('_reg2T1{}/antsreg1Warp.nii.gz'.format(i),
                           session + '/' + 'reg2T1_ref_warp.nii.gz')]
        substitutions += [('_reg2T1{}/antsregWarped.nii.gz'.format(i),
                           session + '/' + 'T1_reg2T1_ref.nii.gz')]
        substitutions += [('_regT12CT{}/antsreg0GenericAffine.mat'.format(i),
                           '/regT1_ref2CT.mat')]
        substitutions += [('_masking1{}/antsregWarped_masked.nii.gz'.format(i),
                           session + '/' + 'T2_preproc.nii.gz')]
        substitutions += [('_masking2{}/antsregWarped_masked.nii.gz'.format(i),
                           session + '/' + 'FLAIR_preproc.nii.gz')]
        substitutions += [('_apply_ts0{}/CT1_trans.nii.gz'.format(i),
                           session + '/' + 'CT1_reg2CT.nii.gz')]
        substitutions += [('_apply_ts1{}/T2_trans.nii.gz'.format(i),
                           session + '/' + 'T2_reg2CT.nii.gz')]
        substitutions += [('_apply_ts2{}/FLAIR_trans.nii.gz'.format(i),
                           session + '/' + 'FLAIR_reg2CT.nii.gz')]
        substitutions += [('_apply_ts_t1{}/T1_trans.nii.gz'.format(i),
                           session + '/' + 'T1_reg2CT.nii.gz')]
        substitutions += [('_apply_ts10{}/CT1_trans.nii.gz'.format(i),
                           session + '/' + 'CT1_reg2T1_ref.nii.gz')]
        substitutions += [('_apply_ts11{}/T2_trans.nii.gz'.format(i),
                           session + '/' + 'T2_reg2T1_ref.nii.gz')]
        substitutions += [('_apply_ts12{}/FLAIR_trans.nii.gz'.format(i),
                           session + '/' + 'FLAIR_reg2T1_ref.nii.gz')]

    datasink.inputs.substitutions = substitutions
    # Create Workflow
    workflow = nipype.Workflow('registration_workflow', base_dir=nipype_cache)

    for i, reg in enumerate(reg_nodes):
        workflow.connect(datasource, SEQUENCES[i + 1], reg, 'input_file')
        workflow.connect(datasource, SEQUENCES[0], reg, 'ref_file')
    # bring every MR in CT space
    for i, node in enumerate(apply_ts_nodes):
        workflow.connect(datasource, SEQUENCES[i + 1], node, 'input_image')
        if reference:
            workflow.connect(datasource, 'reference', node, 'reference_image')
        else:
            workflow.connect(datasource, 't1_0', node, 'reference_image')
        workflow.connect(merge_nodes[i], 'out', node, 'transforms')
        workflow.connect(node, 'output_image', datasink,
                         'results.subid.@{}_reg2CT'.format(SEQUENCES[i + 1]))
    # bring every MR in T1_ref space
    for i, node in enumerate(apply_ts_nodes1):
        workflow.connect(datasource, SEQUENCES[i + 1], node, 'input_image')
        workflow.connect(datasource, 't1_0', node, 'reference_image')
        workflow.connect(merge_nodes1[i], 'out', node, 'transforms')
        workflow.connect(
            node, 'output_image', datasink,
            'results.subid.@{}_reg2T1_ref'.format(SEQUENCES[i + 1]))

    for i, node in enumerate(merge_nodes):
        workflow.connect(reg_nodes[i], 'regmat', node, 'in{}'.format(if_0 + 2))
        workflow.connect(reg2T1, 'regmat', node, 'in{}'.format(if_0 + 1))
        workflow.connect(reg2T1, 'warp_file', node, 'in{}'.format(if_0))
        if reference:
            workflow.connect(fake_merge, 'out', node, 'in1')

    for i, node in enumerate(merge_nodes1):
        workflow.connect(reg_nodes[i], 'regmat', node, 'in3')
        workflow.connect(reg2T1, 'regmat', node, 'in2')
        workflow.connect(reg2T1, 'warp_file', node, 'in1')

    for i, mask in enumerate(apply_mask_nodes):
        workflow.connect(reg_nodes[i], 'reg_file', mask, 'in_file')
        if bet_workflow is not None:
            workflow.connect(bet_workflow, 'bet.out_mask', mask, 'mask_file')
        else:
            workflow.connect(datasource, 't1_mask', mask, 'mask_file')
        workflow.connect(mask, 'out_file', datasink,
                         'results.subid.@{}_preproc'.format(SEQUENCES[i + 1]))
    if bet_workflow is not None:
        workflow.connect(bet_workflow, 'bet.out_file', reg2T1, 'input_file')
        workflow.connect(bet_workflow, 't1_0_bet.out_file', reg2T1, 'ref_file')
    else:
        workflow.connect(datasource, 't1_bet', reg2T1, 'input_file')
        workflow.connect(datasource, 't1_0_bet', reg2T1, 'ref_file')

    if reference:
        for i, sess in enumerate(sessions):
            workflow.connect(regT12CT, 'regmat', fake_merge,
                             'in{}'.format(i + 1))
            workflow.connect(regT12CT, 'regmat', datasink,
                             'results.subid.{0}.@regT12CT_mat'.format(sess))
        workflow.connect(datasource, 'reference', regT12CT, 'ref_file')
        workflow.connect(datasource, 't1_0', regT12CT, 'input_file')
        workflow.connect(fake_merge, 'out', merge_ts_t1, 'in1')
        workflow.connect(datasource, 'reference', apply_ts_t1,
                         'reference_image')
    else:
        workflow.connect(datasource, 't1_0', apply_ts_t1, 'reference_image')

    workflow.connect(datasource, 't1', apply_ts_t1, 'input_image')

    workflow.connect(merge_ts_t1, 'out', apply_ts_t1, 'transforms')
    workflow.connect(reg2T1, 'regmat', merge_ts_t1, 'in{}'.format(if_0 + 1))
    workflow.connect(reg2T1, 'warp_file', merge_ts_t1, 'in{}'.format(if_0))

    workflow.connect(reg2T1, 'warp_file', datasink,
                     'results.subid.@reg2CT_warp')
    workflow.connect(reg2T1, 'regmat', datasink, 'results.subid.@reg2CT_mat')
    workflow.connect(reg2T1, 'reg_file', datasink, 'results.subid.@T12T1_ref')
    workflow.connect(apply_ts_t1, 'output_image', datasink,
                     'results.subid.@T1_reg2CT')

    if bet_workflow is not None:
        workflow = datasink_base(datasink, datasource, workflow, sessions,
                                 reference)
    else:
        workflow = datasink_base(datasink,
                                 datasource,
                                 workflow,
                                 sessions,
                                 reference,
                                 extra_nodes=['t1_bet'])

    return workflow
示例#23
0
    def sorting_workflow(self,
                         subject_name_position=-3,
                         renaming=False,
                         mr_classiffication=True,
                         checkpoints=None,
                         sub_checkpoints=None):

        nipype_cache = os.path.join(self.nipype_cache, 'data_sorting')
        result_dir = self.result_dir

        workflow = nipype.Workflow('sorting_workflow', base_dir=nipype_cache)
        datasink = nipype.Node(nipype.DataSink(base_directory=result_dir),
                               "datasink")

        #         prep = nipype.Node(interface=FolderPreparation(), name='prep')
        #         prep.inputs.input_dir = self.base_dir
        #         create_list = nipype.Node(interface=CreateSubjectsList(), name='cl')
        #         create_list.inputs.input_dir = self.base_dir
        file_check = nipype.Node(interface=FileCheck(), name='fc')
        file_check.inputs.input_dir = self.base_dir
        file_check.inputs.subject_name_position = subject_name_position
        file_check.inputs.renaming = renaming
        prep = nipype.MapNode(interface=FolderPreparation(),
                              name='prep',
                              iterfield=['input_list'])
        sort = nipype.MapNode(interface=FolderSorting(),
                              name='sort',
                              iterfield=['input_dir'])
        mr_rt_merge = nipype.MapNode(interface=Merge(2),
                                     name='mr_rt_merge',
                                     iterfield=['in1', 'in2'])
        mr_rt_merge.inputs.ravel_inputs = True
        merging = nipype.Node(interface=FolderMerge(), name='merge')
        if mr_classiffication:
            if checkpoints is None or sub_checkpoints is None:
                raise Exception('MRClass weights were not provided, MR image '
                                'classification cannot be performed!')
            mrclass = nipype.MapNode(interface=MRClass(),
                                     name='mrclass',
                                     iterfield=['mr_images'])
            mrclass.inputs.checkpoints = checkpoints
            mrclass.inputs.sub_checkpoints = sub_checkpoints
        else:
            mr_rt_merge.inputs.in1 = None
        rt_sorting = nipype.MapNode(interface=RTDataSorting(),
                                    name='rt_sorting',
                                    iterfield=['input_dir'])

        #         workflow.connect(create_list, 'file_list', file_check, 'input_file')
        workflow.connect(file_check, 'out_list', prep, 'input_list')
        workflow.connect(prep, 'out_folder', sort, 'input_dir')
        workflow.connect(sort, 'out_folder', rt_sorting, 'input_dir')
        if mr_classiffication:
            workflow.connect(sort, 'mr_images', mrclass, 'mr_images')
            workflow.connect(mrclass, 'out_folder', mr_rt_merge, 'in1')

            workflow.connect(rt_sorting, 'out_folder', mr_rt_merge, 'in2')
            workflow.connect(mr_rt_merge, 'out', merging, 'input_list')
            workflow.connect(merging, 'out_folder', datasink, '@rt_sorted')
        else:
            workflow.connect(rt_sorting, 'out_folder', datasink, '@rt_sorted')
            substitutions = [('_rt_sorting\d+/', '')]
            datasink.inputs.regexp_substitutions = substitutions

        return workflow
    if os.path.isdir(os.path.join(BASE_DIR, x))
]

for n, sub in enumerate(SUB_LIST):
    for contrast in CONTRASTS:
        sub_name = sub.split('/')[-1]
        sessions = [
            x.split('/')[-1] for x in sorted(glob.glob(os.path.join(sub, '*')))
            if 'reference_tp' not in x
        ]
        ref_tp = [
            x.split('/')[-1] for x in sorted(glob.glob(os.path.join(sub, '*')))
            if 'reference_tp' in x
        ][0]
        datasource = nipype.Node(interface=nipype.DataGrabber(
            infields=['contrasts', 'sub_id', 'sessions', 'ref_tp'],
            outfields=['reference', 'to_reg']),
                                 name='datasource')
        datasource.inputs.base_directory = BASE_DIR
        datasource.inputs.template = '*'
        datasource.inputs.sort_filelist = True
        datasource.inputs.field_template = dict(
            reference='%s/%s/%s_bet.nii.gz', to_reg='%s/%s/%s_bet.nii.gz')
        datasource.inputs.template_args = dict(
            to_reg=[['sub_id', 'sessions', 'contrasts']],
            reference=[['sub_id', 'ref_tp', 'contrasts']])
        datasource.inputs.raise_on_empty = False
        datasource.inputs.contrasts = contrast
        datasource.inputs.sub_id = sub.split('/')[-1]
        datasource.inputs.sessions = sessions
        datasource.inputs.ref_tp = ref_tp
示例#25
0
def tumor_segmentation(datasource,
                       sub_id,
                       sessions,
                       gtv_model,
                       tumor_model,
                       result_dir,
                       nipype_cache,
                       reference,
                       reg_workflow=None,
                       bet_workflow=None):

    if reg_workflow is None:
        if reference:
            iterfields_t1 = ['in1', 'in2', 'in3']
            if_0 = 2
        else:
            iterfields_t1 = ['in1', 'in2']
            if_0 = 1
        merge_ts_t1 = nipype.MapNode(interface=Merge(len(iterfields_t1)),
                                     iterfield=iterfields_t1,
                                     name='merge_t1')
        merge_ts_t1.inputs.ravel_inputs = True

    apply_ts_gtv = nipype.MapNode(interface=ApplyTransforms(),
                                  iterfield=['input_image', 'transforms'],
                                  name='apply_ts_gtv')
    apply_ts_gtv.inputs.interpolation = 'NearestNeighbor'
    apply_ts_tumor = nipype.MapNode(interface=ApplyTransforms(),
                                    iterfield=['input_image', 'transforms'],
                                    name='apply_ts_tumor')
    apply_ts_tumor.inputs.interpolation = 'NearestNeighbor'
    apply_ts_tumor1 = nipype.MapNode(interface=ApplyTransforms(),
                                     iterfield=['input_image', 'transforms'],
                                     name='apply_ts_tumor1')
    apply_ts_tumor1.inputs.interpolation = 'NearestNeighbor'

    if reference:
        merge_ts_t1ref = nipype.MapNode(interface=Merge(len(iterfields_t1)),
                                        iterfield=['in1', 'in2'],
                                        name='merge_t1ref')
        merge_ts_t1ref.inputs.ravel_inputs = True
        apply_ts_gtv_t1ref = nipype.MapNode(
            interface=ApplyTransforms(),
            iterfield=['input_image', 'transforms'],
            name='apply_ts_gtv_t1ref')
        apply_ts_gtv_t1ref.inputs.interpolation = 'NearestNeighbor'
        apply_ts_tumor_t1ref = nipype.MapNode(
            interface=ApplyTransforms(),
            iterfield=['input_image', 'transforms'],
            name='apply_ts_tumor_t1ref')
        apply_ts_tumor_t1ref.inputs.interpolation = 'NearestNeighbor'
        apply_ts_tumor1_t1ref = nipype.MapNode(
            interface=ApplyTransforms(),
            iterfield=['input_image', 'transforms'],
            name='apply_ts_tumor1_t1ref')
        apply_ts_tumor1_t1ref.inputs.interpolation = 'NearestNeighbor'
        outname = 'reg2CT'
    else:
        outname = 'reg2T1ref'

    tumor_seg = nipype.MapNode(interface=HDGlioPredict(),
                               iterfield=['t1', 'ct1', 't2', 'flair'],
                               name='tumor_segmentation')
    tumor_seg.inputs.out_file = 'segmentation'

    mi = nipype.MapNode(Merge(2), iterfield=['in1', 'in2'], name='merge')

    gtv_seg_data_prep = nipype.MapNode(interface=NNUnetPreparation(),
                                       iterfield=['images'],
                                       name='gtv_seg_data_prep')

    gtv_seg = nipype.MapNode(interface=NNUnetInference(),
                             iterfield=['input_folder'],
                             name='gtv_segmentation')
    gtv_seg.inputs.model_folder = gtv_model

    tumor_seg_2mods = nipype.MapNode(interface=NNUnetInference(),
                                     iterfield=['input_folder'],
                                     name='tumor_seg_2mods')
    tumor_seg_2mods.inputs.model_folder = tumor_model

    datasink = nipype.Node(nipype.DataSink(base_directory=result_dir),
                           "datasink")

    substitutions = [('/segmentation.nii.gz', '/Tumor_predicted.nii.gz')]
    substitutions += [('subid', sub_id)]
    for i, session in enumerate(sessions):
        substitutions += [('_tumor_segmentation{}/'.format(i), session + '/')]
        substitutions += [('_gtv_segmentation{}/subject1'.format(i),
                           session + '/GTV_predicted')]
        substitutions += [('_tumor_seg_2mods{}/subject1'.format(i),
                           session + '/Tumor_predicted_2modalities')]
        substitutions += [
            ('_apply_ts_gtv{}/subject1_trans.nii.gz'.format(i),
             session + '/' + 'GTV_predicted_{}.nii.gz'.format(outname))
        ]
        substitutions += [
            ('_apply_ts_tumor1{}/subject1_trans.nii.gz'.format(i), session +
             '/' + 'Tumor_predicted_2modalities_{}.nii.gz'.format(outname))
        ]
        substitutions += [
            ('_apply_ts_tumor{}/segmentation_trans.nii.gz'.format(i),
             session + '/' + 'Tumor_predicted_{}.nii.gz'.format(outname))
        ]

        substitutions += [
            ('_apply_ts_gtv_t1ref{}/subject1_trans.nii.gz'.format(i),
             session + '/' + 'GTV_predicted_reg2T1ref.nii.gz')
        ]
        substitutions += [
            ('_apply_ts_tumor1_t1ref{}/subject1_trans.nii.gz'.format(i),
             session + '/' + 'Tumor_predicted_2modalities_reg2T1ref.nii.gz')
        ]
        substitutions += [
            ('_apply_ts_tumor_t1ref{}/segmentation_trans.nii.gz'.format(i),
             session + '/' + 'Tumor_predicted_reg2T1ref.nii.gz')
        ]
    datasink.inputs.substitutions = substitutions

    # Create Workflow
    workflow = nipype.Workflow('tumor_segmentation_workflow',
                               base_dir=nipype_cache)

    # Connect from registration workflow, if provided
    if reg_workflow is not None:
        workflow.connect(reg_workflow, 'masking0.out_file', mi, 'in1')
        workflow.connect(reg_workflow, 'masking2.out_file', mi, 'in2')
        workflow.connect(reg_workflow, 'masking0.out_file', tumor_seg, 'ct1')
        workflow.connect(reg_workflow, 'masking1.out_file', tumor_seg, 't2')
        workflow.connect(reg_workflow, 'masking2.out_file', tumor_seg, 'flair')
        workflow.connect(bet_workflow, 'bet.out_file', tumor_seg, 't1')
        workflow.connect(reg_workflow, 'merge_t1.out', apply_ts_tumor,
                         'transforms')
        workflow.connect(reg_workflow, 'merge_t1.out', apply_ts_gtv,
                         'transforms')
        workflow.connect(reg_workflow, 'merge_t1.out', apply_ts_tumor1,
                         'transforms')
        if reference:
            workflow.connect(reg_workflow, 'reg2T1.regmat', merge_ts_t1ref,
                             'in2')
            workflow.connect(reg_workflow, 'reg2T1.warp_file', merge_ts_t1ref,
                             'in1')
    else:
        #         for i in range(len(sessions)):
        #             workflow.connect(datasource, 't12ct_mat', fake_merge,
        #                              'in{}'.format(i+1))
        workflow.connect(datasource, 'reg2t1_mat', merge_ts_t1,
                         'in{}'.format(if_0 + 1))
        workflow.connect(datasource, 'reg2t1_warp', merge_ts_t1,
                         'in{}'.format(if_0))
        if reference:
            workflow.connect(datasource, 't12ct_mat', merge_ts_t1, 'in1')
            workflow.connect(datasource, 'reg2t1_mat', merge_ts_t1ref, 'in1')
            workflow.connect(datasource, 'reg2t1_warp', merge_ts_t1ref, 'in2')
        workflow.connect(merge_ts_t1, 'out', apply_ts_tumor, 'transforms')
        workflow.connect(merge_ts_t1, 'out', apply_ts_gtv, 'transforms')
        workflow.connect(merge_ts_t1, 'out', apply_ts_tumor1, 'transforms')
        workflow.connect(datasource, 'ct1_preproc', mi, 'in1')
        workflow.connect(datasource, 'flair_preproc', mi, 'in2')
        workflow.connect(datasource, 'ct1_preproc', tumor_seg, 'ct1')
        workflow.connect(datasource, 't2_preproc', tumor_seg, 't2')
        workflow.connect(datasource, 'flair_preproc', tumor_seg, 'flair')
        workflow.connect(datasource, 't1_preproc', tumor_seg, 't1')

    # Connect from datasource
    if reference:
        workflow.connect(merge_ts_t1ref, 'out', apply_ts_tumor_t1ref,
                         'transforms')
        workflow.connect(merge_ts_t1ref, 'out', apply_ts_gtv_t1ref,
                         'transforms')
        workflow.connect(merge_ts_t1ref, 'out', apply_ts_tumor1_t1ref,
                         'transforms')
        workflow.connect(datasource, 'reference', apply_ts_gtv,
                         'reference_image')
        workflow.connect(datasource, 'reference', apply_ts_tumor1,
                         'reference_image')
        workflow.connect(datasource, 'reference', apply_ts_tumor,
                         'reference_image')
        workflow.connect(datasource, 't1_0', apply_ts_gtv_t1ref,
                         'reference_image')
        workflow.connect(datasource, 't1_0', apply_ts_tumor1_t1ref,
                         'reference_image')
        workflow.connect(datasource, 't1_0', apply_ts_tumor_t1ref,
                         'reference_image')
    else:
        workflow.connect(datasource, 't1_0', apply_ts_gtv, 'reference_image')
        workflow.connect(datasource, 't1_0', apply_ts_tumor1,
                         'reference_image')
        workflow.connect(datasource, 't1_0', apply_ts_tumor, 'reference_image')

    # Connect other nodes

    # Nodes to prepare the data before nnUNet inference
    workflow.connect(mi, 'out', gtv_seg_data_prep, 'images')

    # Nodes to segment GTV and tumor using nnUNet
    workflow.connect(gtv_seg_data_prep, 'output_folder', gtv_seg,
                     'input_folder')
    workflow.connect(gtv_seg_data_prep, 'output_folder', tumor_seg_2mods,
                     'input_folder')

    # Nodes to normalize segmentations to CT space
    workflow.connect(gtv_seg, 'output_file', apply_ts_gtv, 'input_image')
    workflow.connect(tumor_seg_2mods, 'output_file', apply_ts_tumor1,
                     'input_image')
    workflow.connect(tumor_seg, 'out_file', apply_ts_tumor, 'input_image')

    # Connect datasink nodes to save outputs
    workflow.connect(tumor_seg, 'out_file', datasink,
                     'results.subid.@tumor_seg')
    workflow.connect(gtv_seg, 'output_file', datasink,
                     'results.subid.@gtv_seg')
    workflow.connect(tumor_seg_2mods, 'output_file', datasink,
                     'results.subid.@tumor_seg_2mods')
    workflow.connect(apply_ts_gtv, 'output_image', datasink,
                     'results.subid.@gtv_reg2CT')
    workflow.connect(apply_ts_tumor, 'output_image', datasink,
                     'results.subid.@tumor_reg2CT')
    workflow.connect(apply_ts_tumor1, 'output_image', datasink,
                     'results.subid.@tumor1_reg2CT')
    if reference:
        workflow.connect(tumor_seg_2mods, 'output_file', apply_ts_tumor1_t1ref,
                         'input_image')
        workflow.connect(tumor_seg, 'out_file', apply_ts_tumor_t1ref,
                         'input_image')
        workflow.connect(gtv_seg, 'output_file', apply_ts_gtv_t1ref,
                         'input_image')
        workflow.connect(apply_ts_gtv_t1ref, 'output_image', datasink,
                         'results.subid.@gtv_reg2T1ref')
        workflow.connect(apply_ts_tumor_t1ref, 'output_image', datasink,
                         'results.subid.@tumor_reg2T1ref')
        workflow.connect(apply_ts_tumor1_t1ref, 'output_image', datasink,
                         'results.subid.@tumor1_reg2T1ref')

    workflow = datasink_base(datasink, datasource, workflow, sessions,
                             reference)

    return workflow
示例#26
0
from basecore.interfaces.utils import DicomCheck, ConversionCheck
import nipype
import nipype.interfaces.utility as util
from nipype.interfaces.dcm2nii import Dcm2niix


contrasts = ['T1KM', 'FLAIR', 'CT', 'ADC', 'T1', 'SWI', 'T2', 'T2KM']
rt_files = ['RTSTRUCT', 'RTDOSE', 'RTPLAN']
base_dir = '/media/fsforazz/portable_hdd/data_sorted/test/'
result_dir = '/mnt/sdb/Cinderella_FU_sorted_all_test2/'
cache_dir = '/mnt/sdb/sorted_data/sorting_cache2/'

inputnode = nipype.Node(
    interface=util.IdentityInterface(fields=['contrasts']),
    name='inputnode')
inputnode.iterables = ('contrasts', contrasts)

datasource = nipype.Node(
    interface=nipype.DataGrabber(infields=['contrasts'], outfields=['directory']),
    name='datasource')  
datasource.inputs.base_directory = base_dir
datasource.inputs.template = '*'
datasource.inputs.sort_filelist = True
datasource.inputs.field_template = dict(directory='*/*/%s/1-*')

inputnode_rt = nipype.Node(
    interface=util.IdentityInterface(fields=['rt_files']),
    name='inputnode_rt')
inputnode_rt.iterables = ('rt_files', rt_files)

datasource_rt = nipype.Node(
示例#27
0
    def convertion_workflow(self):

        self.datasource()

        datasource = self.data_source
        ref_sequence = self.ref_sequence
        t10 = self.t10
        sub_id = self.sub_id
        result_dir = self.result_dir
        nipype_cache = self.nipype_cache
        sequences = self.sequences
        reference = self.reference
        rt_data = self.rt
        if rt_data is not None:
            rt_session = rt_data['session']

        workflow = nipype.Workflow('data_convertion_workflow',
                                   base_dir=nipype_cache)

        datasink = nipype.Node(nipype.DataSink(base_directory=result_dir),
                               "datasink")
        substitutions = [('subid', sub_id)]
        substitutions += [('results/', '{}/'.format(self.workflow_name))]
        if type(ref_sequence) == list:
            to_convert = sequences + ref_sequence
        else:
            to_convert = sequences + [ref_sequence]
        if rt_data is not None:
            rt_sequences = [
                x for x in rt_data.keys()
                if rt_data[x] and x != 'session' and x != 'labels'
            ]
            workflow.connect(datasource, 'rt', datasink, 'results.subid.@rt')
            to_convert = to_convert + rt_sequences
        else:
            rt_sequences = []

        if reference:
            to_convert.append('reference')
        if t10:
            to_convert.append('t1_0')
        if self.ct_sessions:
            to_convert.append('ct')

        for seq in to_convert:
            if seq not in rt_sequences:
                dc = nipype.MapNode(interface=DicomCheck(),
                                    iterfield=['dicom_dir'],
                                    name='dc{}'.format(seq))
                workflow.connect(datasource, seq, dc, 'dicom_dir')
                converter = nipype.MapNode(
                    interface=Dcm2niix(),
                    iterfield=['source_dir', 'out_filename'],
                    name='converter{}'.format(seq))
                converter.inputs.compress = 'y'
                converter.inputs.philips_float = False
                if seq == 'reference' or seq == 'ct':
                    converter.inputs.merge_imgs = True
                else:
                    converter.inputs.merge_imgs = False
                check = nipype.MapNode(interface=ConversionCheck(),
                                       iterfield=['in_file', 'file_name'],
                                       name='check_conversion{}'.format(seq))

                workflow.connect(dc, 'outdir', converter, 'source_dir')
                workflow.connect(dc, 'scan_name', converter, 'out_filename')
                workflow.connect(dc, 'scan_name', check, 'file_name')
                workflow.connect(converter, 'converted_files', check,
                                 'in_file')
                if seq == 'reference':
                    workflow.connect(
                        check, 'out_file', datasink,
                        'results.subid.REF.@{}_converted'.format(seq))
                elif seq == 't1_0':
                    workflow.connect(
                        check, 'out_file', datasink,
                        'results.subid.T10.@{}_converted'.format(seq))
                else:
                    workflow.connect(check, 'out_file', datasink,
                                     'results.subid.@{}_converted'.format(seq))
                    for i, session in enumerate(self.session_names[seq]):
                        substitutions += [(('_converter{0}{1}/'.format(seq, i),
                                            session + '/'))]
            else:
                if seq != 'rtstruct':
                    if seq == 'rtct':
                        converter = nipype.MapNode(
                            interface=Dcm2niix(),
                            iterfield=['source_dir', 'out_filename'],
                            name='converter{}'.format(seq))
                        converter.inputs.compress = 'y'
                        converter.inputs.philips_float = False
                        converter.inputs.merge_imgs = True
                    else:
                        converter = nipype.MapNode(
                            interface=DoseConverter(),
                            iterfield=['input_dose', 'out_name'],
                            name='converter{}'.format(seq))
                    if seq == 'doses':
                        converter = nipype.MapNode(
                            interface=DoseConverter(),
                            iterfield=['input_dose'],
                            name='converter{}'.format(seq))
                        get_dose = nipype.MapNode(interface=GetRefRTDose(),
                                                  iterfield=['doses'],
                                                  name='get_doses')
                        workflow.connect(datasource, 'doses', get_dose,
                                         'doses')
                        workflow.connect(get_dose, 'dose_file', converter,
                                         'input_dose')
                        converter.inputs.out_name = 'Unused_RTDOSE.nii.gz'
                        workflow.connect(
                            converter, 'out_file', datasink,
                            'results.subid.@{}_converted'.format(seq))
                    else:
                        dc = nipype.MapNode(interface=DicomCheck(),
                                            iterfield=['dicom_dir'],
                                            name='dc{}'.format(seq))
                        workflow.connect(datasource, seq, dc, 'dicom_dir')
                        if seq == 'rtct':
                            check = nipype.MapNode(
                                interface=ConversionCheck(),
                                iterfield=['in_file', 'file_name'],
                                name='check_conversion{}'.format(seq))

                            workflow.connect(dc, 'outdir', converter,
                                             'source_dir')
                            workflow.connect(dc, 'scan_name', converter,
                                             'out_filename')
                            workflow.connect(dc, 'scan_name', check,
                                             'file_name')
                            workflow.connect(converter, 'converted_files',
                                             check, 'in_file')
                            workflow.connect(
                                check, 'out_file', datasink,
                                'results.subid.@{}_converted'.format(seq))
                        else:
                            workflow.connect(dc, 'dose_file', converter,
                                             'input_dose')
                            workflow.connect(dc, 'scan_name', converter,
                                             'out_name')
                            workflow.connect(
                                converter, 'out_file', datasink,
                                'results.subid.@{}_converted'.format(seq))
                else:
                    dc = nipype.MapNode(interface=DicomCheck(),
                                        iterfield=['dicom_dir'],
                                        name='dc{}'.format(seq))
                    workflow.connect(datasource, seq, dc, 'dicom_dir')
                    workflow.connect(dc, 'outdir', datasink,
                                     'results.subid.@rtstruct')
                    for i, session in enumerate(rt_session):
                        substitutions += [
                            (('_dc{0}{1}/checked_dicoms'.format(seq, i),
                              session + '/RTSTRUCT_used'))
                        ]
                for i, session in enumerate(rt_session):
                    substitutions += [(('_converter{0}{1}/'.format(seq, i),
                                        session + '/'))]

        substitutions += [('_converterreference0/', '')]
        substitutions += [('_convertert1_00/', '')]

        datasink.inputs.substitutions = substitutions

        return workflow
示例#28
0
    def workflow(self):

        #         self.datasource()
        datasource = self.data_source
        dict_sequences = self.dict_sequences
        nipype_cache = self.nipype_cache
        result_dir = self.result_dir
        sub_id = self.sub_id
        regex = self.regex
        roi_selection = self.roi_selection

        workflow = nipype.Workflow('rtstruct_extraction_workflow',
                                   base_dir=nipype_cache)
        datasink = nipype.Node(nipype.DataSink(base_directory=result_dir),
                               "datasink")
        substitutions = [('subid', sub_id)]
        substitutions += [('results/', '{}/'.format(self.workflow_name))]
        substitutions += [('_mha_convert/', '/')]

        rt_sessions = dict_sequences['RT']
        for key in rt_sessions:
            rt_files = rt_sessions[key]
            if rt_files['phy_dose'] is not None:
                dose_name = '{0}_phy_dose'.format(key)
            elif rt_files['rbe_dose'] is not None:
                dose_name = '{0}_rbe_dose'.format(key)
            elif rt_files['ot_dose'] is not None:
                dose_name = '{0}_ot_dose'.format(key)
            else:
                roi_selection = False

            if rt_files['rtct'] is not None and rt_files[
                    'rtstruct'] is not None:
                ss_convert = nipype.Node(interface=RTStructureCoverter(),
                                         name='ss_convert')
                mha_convert = nipype.Node(interface=MHA2NIIConverter(),
                                          name='mha_convert')

                if roi_selection:
                    select = nipype.Node(interface=CheckRTStructures(),
                                         name='select_gtv')
                    workflow.connect(mha_convert, 'out_files', select, 'rois')
                    workflow.connect(datasource, dose_name, select,
                                     'dose_file')
                    workflow.connect(select, 'checked_roi', datasink,
                                     'results.subid.{}.@masks'.format(key))
                else:
                    workflow.connect(mha_convert, 'out_files', datasink,
                                     'results.subid.{}.@masks'.format(key))

                datasink.inputs.substitutions = substitutions

                workflow.connect(datasource, '{0}_rtct'.format(key),
                                 ss_convert, 'reference_ct')
                workflow.connect(datasource, '{0}_rtstruct'.format(key),
                                 ss_convert, 'input_ss')
                workflow.connect(ss_convert, 'out_structures', mha_convert,
                                 'input_folder')
            else:
                print(
                    'NO RTCT OR RTSTRUCT!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')

#         if datasource is not None:
#
#             workflow = nipype.Workflow('rtstruct_extraction_workflow', base_dir=nipype_cache)
#
#             datasink = nipype.Node(nipype.DataSink(base_directory=result_dir), "datasink")
#             substitutions = [('subid', sub_id)]
#             substitutions += [('results/', '{}/'.format(self.workflow_name))]
#
#             ss_convert = nipype.MapNode(interface=RTStructureCoverter(),
#                                        iterfield=['reference_ct', 'input_ss'],
#                                        name='ss_convert')
#             mha_convert = nipype.MapNode(interface=MHA2NIIConverter(),
#                                          iterfield=['input_folder'],
#                                          name='mha_convert')
#
#             if roi_selection:
#                 select = nipype.MapNode(interface=CheckRTStructures(),
#                                         iterfield=['rois', 'dose_file'],
#                                         name='select_gtv')
#                 workflow.connect(mha_convert, 'out_files', select, 'rois')
#                 workflow.connect(datasource, 'rt_dose', select, 'dose_file')
#                 workflow.connect(select, 'checked_roi', datasink,
#                                  'results.subid.@masks')
#             else:
#                 workflow.connect(mha_convert, 'out_files', datasink,
#                                  'results.subid.@masks')
#
#             for i, session in enumerate(self.rt['session']):
#                 substitutions += [(('_select_gtv{}/'.format(i), session+'/'))]
#                 substitutions += [(('_voxelizer{}/'.format(i), session+'/'))]
#                 substitutions += [(('_mha_convert{}/'.format(i), session+'/'))]
#
#             datasink.inputs.substitutions =substitutions
#
#             workflow.connect(datasource, 'rtct_nifti', ss_convert, 'reference_ct')
#             workflow.connect(datasource, 'rts_dcm', ss_convert, 'input_ss')
#             workflow.connect(ss_convert, 'out_structures', mha_convert, 'input_folder')
#
#             workflow = self.datasink(workflow, datasink)
#         else:
#             workflow = nipype.Workflow('rtstruct_extraction_workflow', base_dir=nipype_cache)

        return workflow
示例#29
0
    CLEAN_CACHE = ARGS.clean_cache

    sequences = ['t1', 'ct1', 't2', 'flair']
    sub_list = os.listdir(BASE_DIR)

    if not os.path.isdir(WORKFLOW_CACHE):
        os.makedirs(WORKFLOW_CACHE)

    for sub_id in sub_list:
        NIPYPE_CACHE = os.path.join(NIPYPE_CACHE_BASE, sub_id)

        sessions = [x for x in os.listdir(os.path.join(BASE_DIR, sub_id))
                    if 'REF' not in x and 'T10' not in x and 'RT_' not in x]
        datasource = nipype.Node(
            interface=nipype.DataGrabber(
                infields=['sub_id', 'sessions', 'ref_ct', 'ref_t1'],
                outfields=['t1', 'ct1', 't2', 'flair', 'reference', 't1_0']),
                name='datasource')
        datasource.inputs.base_directory = BASE_DIR
        datasource.inputs.template = '*'
        datasource.inputs.sort_filelist = True
        datasource.inputs.raise_on_empty = False
        datasource.inputs.field_template = dict(t1='%s/%s/T1.nii.gz', ct1='%s/%s/CT1.nii.gz',
                                                t2='%s/%s/T2.nii.gz', flair='%s/%s/FLAIR.nii.gz',
                                                reference='%s/%s/CT.nii.gz',
                                                t1_0='%s/%s/T1.nii.gz')
        datasource.inputs.template_args = dict(t1=[['sub_id', 'sessions']],
                                               ct1=[['sub_id', 'sessions']],
                                               t2=[['sub_id', 'sessions']],
                                               flair=[['sub_id', 'sessions']],
                                               reference=[['sub_id', 'ref_ct']],