Exemplo n.º 1
0
def anatomical_preprocessing():
    '''
    Inputs:
        MP2RAGE Skull stripped image using Spectre-2010

    Workflow:
        1. reorient to RPI
        2. create a brain mask

    Returns:
        brain
        brain_mask

    '''
    # define workflow
    flow = Workflow('anat_preprocess')
    inputnode = Node(util.IdentityInterface(
        fields=['anat', 'anat_gm', 'anat_wm', 'anat_csf', 'anat_first']),
                     name='inputnode')
    outputnode = Node(util.IdentityInterface(fields=[
        'brain',
        'brain_gm',
        'brain_wm',
        'brain_csf',
        'brain_first',
        'brain_mask',
    ]),
                      name='outputnode')

    reorient = Node(interface=preprocess.Resample(), name='anat_reorient')
    reorient.inputs.orientation = 'RPI'
    reorient.inputs.outputtype = 'NIFTI'

    erode = Node(interface=fsl.ErodeImage(), name='anat_preproc')

    reorient_gm = reorient.clone('anat_preproc_gm')
    reorient_wm = reorient.clone('anat_preproc_wm')
    reorient_cm = reorient.clone('anat_preproc_csf')
    reorient_first = reorient.clone('anat_preproc_first')

    make_mask = Node(interface=fsl.UnaryMaths(), name='anat_preproc_mask')
    make_mask.inputs.operation = 'bin'

    # connect workflow nodes
    flow.connect(inputnode, 'anat', reorient, 'in_file')
    flow.connect(inputnode, 'anat_gm', reorient_gm, 'in_file')
    flow.connect(inputnode, 'anat_wm', reorient_wm, 'in_file')
    flow.connect(inputnode, 'anat_csf', reorient_cm, 'in_file')
    flow.connect(inputnode, 'anat_first', reorient_first, 'in_file')
    flow.connect(reorient, 'out_file', erode, 'in_file')
    flow.connect(erode, 'out_file', make_mask, 'in_file')
    flow.connect(make_mask, 'out_file', outputnode, 'brain_mask')

    flow.connect(erode, 'out_file', outputnode, 'brain')
    flow.connect(reorient_gm, 'out_file', outputnode, 'brain_gm')
    flow.connect(reorient_wm, 'out_file', outputnode, 'brain_wm')
    flow.connect(reorient_cm, 'out_file', outputnode, 'brain_csf')
    flow.connect(reorient_first, 'out_file', outputnode, 'brain_first')

    return flow
Exemplo n.º 2
0
def anatomical_preprocessing():
    '''
    Inputs:
        MP2RAGE Skull stripped image using Spectre-2010

    Workflow:
        1. reorient to RPI
        2. create a brain mask

    Returns:
        brain
        brain_mask

    '''
    # define workflow
    flow = Workflow('anat_preprocess')
    inputnode    = Node(util.IdentityInterface(fields=['anat', 'anat_gm', 'anat_wm', 'anat_csf', 'anat_first']), name = 'inputnode')
    outputnode   = Node(util.IdentityInterface(fields=['brain','brain_gm', 'brain_wm', 'brain_csf', 'brain_first', 'brain_mask',]),  name = 'outputnode')

    reorient   = Node(interface=preprocess.Resample(),                     name = 'anat_reorient')
    reorient.inputs.orientation = 'RPI'
    reorient.inputs.outputtype = 'NIFTI'

    erode = Node(interface=fsl.ErodeImage(),                                 name = 'anat_preproc')

    reorient_gm    = reorient.clone('anat_preproc_gm')
    reorient_wm    = reorient.clone('anat_preproc_wm')
    reorient_cm    = reorient.clone('anat_preproc_csf')
    reorient_first = reorient.clone('anat_preproc_first')

    make_mask    = Node(interface=fsl.UnaryMaths(),                        name = 'anat_preproc_mask')
    make_mask.inputs.operation = 'bin'

    # connect workflow nodes
    flow.connect(inputnode,    'anat'     , reorient,      'in_file'    )
    flow.connect(inputnode,    'anat_gm'  , reorient_gm,   'in_file'    )
    flow.connect(inputnode,    'anat_wm'  , reorient_wm,   'in_file'    )
    flow.connect(inputnode,    'anat_csf' , reorient_cm,   'in_file'    )
    flow.connect(inputnode,    'anat_first' , reorient_first,'in_file'    )
    flow.connect(reorient,     'out_file' , erode,        'in_file'    )
    flow.connect(erode,        'out_file' , make_mask,    'in_file'    )
    flow.connect(make_mask,    'out_file' , outputnode,   'brain_mask' )

    flow.connect(erode,        'out_file' , outputnode,   'brain'      )
    flow.connect(reorient_gm,  'out_file' , outputnode,   'brain_gm'   )
    flow.connect(reorient_wm,  'out_file' , outputnode,   'brain_wm'   )
    flow.connect(reorient_cm,  'out_file' , outputnode,   'brain_csf'  )
    flow.connect(reorient_first,  'out_file' , outputnode,   'brain_first' )

    return flow
Exemplo n.º 3
0
def make_w_masking():

    n_in = Node(IdentityInterface(fields=[
        'func',
        'fmap',  # mean
        ]), name='input')

    n_out = Node(IdentityInterface(fields=[
        'func',
        'fmap',  # mean
        ]), name='output')

    n_mask_func = Node(interface=Automask(), name='mask_func')
    n_mask_func.inputs.clfrac = 0.4
    n_mask_func.inputs.dilate = 4
    n_mask_func.inputs.args = '-nbhrs 15'
    n_mask_func.inputs.outputtype = 'NIFTI'

    n_mask_fmap = n_mask_func.clone('mask_fmap')

    n_mul = Node(interface=BinaryMaths(), name='mul')
    n_mul.inputs.operation = 'mul'

    n_masking = Node(interface=BinaryMaths(), name='masking')
    n_masking.inputs.operation = 'mul'

    n_masking_fmap = Node(interface=BinaryMaths(), name='masking_fmap')
    n_masking_fmap.inputs.operation = 'mul'

    w = Workflow('masking')

    w.connect(n_in, 'func', n_mask_func, 'in_file')
    w.connect(n_in, 'fmap', n_mask_fmap, 'in_file')
    w.connect(n_mask_fmap, 'out_file', n_mul, 'in_file')
    w.connect(n_mask_func, 'out_file', n_mul, 'operand_file')
    w.connect(n_in, 'func', n_masking, 'in_file')
    w.connect(n_mul, 'out_file', n_masking, 'operand_file')
    w.connect(n_masking, 'out_file', n_out, 'func')

    w.connect(n_in, 'fmap', n_masking_fmap, 'in_file')
    w.connect(n_mul, 'out_file', n_masking_fmap, 'operand_file')

    w.connect(n_masking_fmap, 'out_file', n_out, 'fmap')

    return w
FA_to_WAX_Temp.inputs.write_composite_transform = True
FA_to_WAX_Temp.inputs.verbose = True
FA_to_WAX_Temp.inputs.output_warped_image = True
FA_to_WAX_Temp.inputs.float = True

#>>>>>>>>>>>>>>>>>>>>>>>>>MD
antsApplyMD_WAX = Node(ants.ApplyTransforms(), name='antsApplyMD_WAX')
antsApplyMD_WAX.inputs.dimension = 3
antsApplyMD_WAX.inputs.input_image_type = 3
antsApplyMD_WAX.inputs.num_threads = 1
antsApplyMD_WAX.inputs.float = True
antsApplyMD_WAX.inputs.output_image = 'MD_{subject_id}.nii'
antsApplyMD_WAX.inputs.reference_image = Wax_FA_Template

#>>>>>>>>>>>>>>>>>>>>>>>>>AD
antsApplyAD_WAX = antsApplyMD_WAX.clone(name='antsApplyAD_WAX')
antsApplyAD_WAX.inputs.output_image = 'AD_{subject_id}.nii'

#>>>>>>>>>>>>>>>>>>>>>>>>>RD
antsApplyRD_WAX = antsApplyMD_WAX.clone(name='antsApplyRD_WAX')
antsApplyRD_WAX.inputs.output_image = 'RD_{subject_id}.nii'

#>>>>>>>>>>>>>>>>>>>>>>>>>KA
antsApplyKA_WAX = antsApplyMD_WAX.clone(name='antsApplyKA_WAX')
antsApplyKA_WAX.inputs.output_image = 'KA_{subject_id}.nii'

#>>>>>>>>>>>>>>>>>>>>>>>>>AK
antsApplyAK_WAX = antsApplyMD_WAX.clone(name='antsApplyAK_WAX')
antsApplyAK_WAX.inputs.output_image = 'AK_{subject_id}.nii'

#>>>>>>>>>>>>>>>>>>>>>>>>>MK
Exemplo n.º 5
0
FA_to_Study_Temp.inputs.write_composite_transform = True
FA_to_Study_Temp.inputs.verbose = True
FA_to_Study_Temp.inputs.output_warped_image = True
FA_to_Study_Temp.inputs.float = True

#>>>>>>>>>>>>>>>>>>>>>>>>>>>AK
antsApply_AK_Study = Node(ants.ApplyTransforms(), name='antsApply_AK_Study')
antsApply_AK_Study.inputs.dimension = 3
antsApply_AK_Study.inputs.input_image_type = 3
antsApply_AK_Study.inputs.num_threads = 1
antsApply_AK_Study.inputs.float = True
antsApply_AK_Study.inputs.output_image = 'DKI_ExploreDTI_AK.nii'
antsApply_AK_Study.inputs.reference_image = Study_Template

#>>>>>>>>>>>>>>>>>>>>>>>>>AWF
antsApply_AWF_Study = antsApply_AK_Study.clone(name='antsApply_AWF_Study')
antsApply_AWF_Study.inputs.output_image = 'DKI_ExploreDTI_AWF.nii'

#>>>>>>>>>>>>>>>>>>>>>>>>>KA
antsApply_KA_Study = antsApply_AK_Study.clone(name='antsApply_KA_Study')
antsApply_KA_Study.inputs.output_image = 'DKI_ExploreDTI_KA.nii'

#>>>>>>>>>>>>>>>>>>>>>>>>>AD
antsApply_AD_Study = antsApply_AK_Study.clone(name='antsApply_AD_Study')
antsApply_AD_Study.inputs.output_image = 'DKI_ExploreDTI_AD.nii'

#>>>>>>>>>>>>>>>>>>>>>>>>>MD
antsApply_MD_Study = antsApply_AK_Study.clone(name='antsApply_MD_Study')
antsApply_MD_Study.inputs.output_image = 'DKI_ExploreDTI_MD.nii'

#>>>>>>>>>>>>>>>>>>>>>>>>>MK
Exemplo n.º 6
0
def learning_predict_data_2samp_wf(working_dir,
                                   ds_dir,
                                   in_data_name_list,
                                   subjects_selection_crit_dict,
                                   subjects_selection_crit_names_list,
                                   aggregated_subjects_dir,
                                   target_list,
                                   use_n_procs,
                                   plugin_name,
                                   confound_regression=[False, True],
                                   run_cv=False,
                                   n_jobs_cv=1,
                                   run_tuning=False,
                                   run_2sample_training=False,
                                   aggregated_subjects_dir_nki=None,
                                   subjects_selection_crit_dict_nki=None,
                                   subjects_selection_crit_name_nki=None,
                                   reverse_split=False,
                                   random_state_nki=666,
                                   run_learning_curve=False,
                                   life_test_size=0.5):
    import os
    from nipype import config
    from nipype.pipeline.engine import Node, Workflow
    import nipype.interfaces.utility as util
    import nipype.interfaces.io as nio
    from itertools import chain
    from learning_utils import aggregate_multimodal_metrics_fct, run_prediction_split_fct, \
        backproject_and_split_weights_fct, select_subjects_fct, select_multimodal_X_fct, learning_curve_plot
    import pandas as pd

    ###############################################################################################################
    # GENERAL SETTINGS

    wf = Workflow(name='learning_predict_data_2samp_wf')
    wf.base_dir = os.path.join(working_dir)

    nipype_cfg = dict(logging=dict(workflow_level='DEBUG'),
                      execution={
                          'stop_on_first_crash': False,
                          'remove_unnecessary_outputs': False,
                          'job_finished_timeout': 120
                      })
    config.update_config(nipype_cfg)
    wf.config['execution']['crashdump_dir'] = os.path.join(
        working_dir, 'crash')

    ds = Node(nio.DataSink(), name='ds')
    ds.inputs.base_directory = os.path.join(ds_dir,
                                            'group_learning_prepare_data')

    ds.inputs.regexp_substitutions = [
        # ('subject_id_', ''),
        ('_parcellation_', ''),
        ('_bp_freqs_', 'bp_'),
        ('_extraction_method_', ''),
        ('_subject_id_[A0-9]*/', '')
    ]
    ds_pdf = Node(nio.DataSink(), name='ds_pdf')
    ds_pdf.inputs.base_directory = os.path.join(ds_dir, 'pdfs')
    ds_pdf.inputs.parameterization = False

    ###############################################################################################################
    # ensure in_data_name_list is list of lists
    in_data_name_list = [
        i if type(i) == list else [i] for i in in_data_name_list
    ]
    in_data_name_list_unique = list(set(
        chain.from_iterable(in_data_name_list)))

    ###############################################################################################################
    # SET ITERATORS

    in_data_name_infosource = Node(
        util.IdentityInterface(fields=['in_data_name']),
        name='in_data_name_infosource')
    in_data_name_infosource.iterables = ('in_data_name',
                                         in_data_name_list_unique)

    multimodal_in_data_name_infosource = Node(
        util.IdentityInterface(fields=['multimodal_in_data_name']),
        name='multimodal_in_data_name_infosource')
    multimodal_in_data_name_infosource.iterables = ('multimodal_in_data_name',
                                                    in_data_name_list)

    subject_selection_infosource = Node(
        util.IdentityInterface(fields=['selection_criterium']),
        name='subject_selection_infosource')
    subject_selection_infosource.iterables = (
        'selection_criterium', subjects_selection_crit_names_list)

    target_infosource = Node(util.IdentityInterface(fields=['target_name']),
                             name='target_infosource')
    target_infosource.iterables = ('target_name', target_list)

    ###############################################################################################################
    # COMPILE LIFE DATA
    ###############################################################################################################

    ###############################################################################################################
    # GET INFO AND SELECT FILES
    df_all_subjects_pickle_file = os.path.join(
        aggregated_subjects_dir, 'df_all_subjects_pickle_file/df_all.pkl')
    df = pd.read_pickle(df_all_subjects_pickle_file)

    # build lookup dict for unimodal data
    X_file_template = 'X_file/_in_data_name_{in_data_name}/vectorized_aggregated_data.npy'
    info_file_template = 'unimodal_backprojection_info_file/_in_data_name_{in_data_name}/unimodal_backprojection_info.pkl'
    unimodal_lookup_dict = {}
    for k in in_data_name_list_unique:
        unimodal_lookup_dict[k] = {
            'X_file':
            os.path.join(aggregated_subjects_dir,
                         X_file_template.format(in_data_name=k)),
            'unimodal_backprojection_info_file':
            os.path.join(aggregated_subjects_dir,
                         info_file_template.format(in_data_name=k))
        }

    ###############################################################################################################
    # AGGREGATE MULTIMODAL METRICS
    # stack single modality arrays horizontally
    aggregate_multimodal_metrics = Node(util.Function(
        input_names=['multimodal_list', 'unimodal_lookup_dict'],
        output_names=[
            'X_multimodal_file', 'multimodal_backprojection_info',
            'multimodal_name'
        ],
        function=aggregate_multimodal_metrics_fct),
                                        name='aggregate_multimodal_metrics')
    wf.connect(multimodal_in_data_name_infosource, 'multimodal_in_data_name',
               aggregate_multimodal_metrics, 'multimodal_list')
    aggregate_multimodal_metrics.inputs.unimodal_lookup_dict = unimodal_lookup_dict

    ###############################################################################################################
    # GET INDEXER FOR SUBJECTS OF INTEREST (as defined by selection criterium)
    select_subjects = Node(util.Function(input_names=[
        'df_all_subjects_pickle_file', 'subjects_selection_crit_dict',
        'selection_criterium'
    ],
                                         output_names=[
                                             'df_use_file',
                                             'df_use_pickle_file',
                                             'subjects_selection_index'
                                         ],
                                         function=select_subjects_fct),
                           name='select_subjects')

    select_subjects.inputs.df_all_subjects_pickle_file = df_all_subjects_pickle_file
    select_subjects.inputs.subjects_selection_crit_dict = subjects_selection_crit_dict
    wf.connect(subject_selection_infosource, 'selection_criterium',
               select_subjects, 'selection_criterium')

    ###############################################################################################################
    # SELECT MULITMODAL X
    # select subjects (rows) from multimodal X according indexer
    select_multimodal_X = Node(util.Function(
        input_names=[
            'X_multimodal_file', 'subjects_selection_index',
            'selection_criterium'
        ],
        output_names=['X_multimodal_selected_file'],
        function=select_multimodal_X_fct),
                               name='select_multimodal_X')
    wf.connect(aggregate_multimodal_metrics, 'X_multimodal_file',
               select_multimodal_X, 'X_multimodal_file')
    wf.connect(select_subjects, 'subjects_selection_index',
               select_multimodal_X, 'subjects_selection_index')

    ###############################################################################################################
    # COMPILE NKI DATA
    ###############################################################################################################
    if run_2sample_training:

        ###############################################################################################################
        # GET INFO AND SELECT FILES
        df_all_subjects_pickle_file_nki = os.path.join(
            aggregated_subjects_dir_nki,
            'df_all_subjects_pickle_file/df_all.pkl')
        df_nki = pd.read_pickle(df_all_subjects_pickle_file_nki)

        # build lookup dict for unimodal data
        X_file_template = 'X_file/_in_data_name_{in_data_name}/vectorized_aggregated_data.npy'
        info_file_template = 'unimodal_backprojection_info_file/_in_data_name_{in_data_name}/unimodal_backprojection_info.pkl'
        unimodal_lookup_dict_nki = {}
        for k in in_data_name_list_unique:
            unimodal_lookup_dict_nki[k] = {
                'X_file':
                os.path.join(aggregated_subjects_dir_nki,
                             X_file_template.format(in_data_name=k)),
                'unimodal_backprojection_info_file':
                os.path.join(aggregated_subjects_dir_nki,
                             info_file_template.format(in_data_name=k))
            }

        ###############################################################################################################
        # AGGREGATE MULTIMODAL METRICS
        # stack single modality arrays horizontally
        aggregate_multimodal_metrics_nki = Node(
            util.Function(
                input_names=['multimodal_list', 'unimodal_lookup_dict'],
                output_names=[
                    'X_multimodal_file', 'multimodal_backprojection_info',
                    'multimodal_name'
                ],
                function=aggregate_multimodal_metrics_fct),
            name='aggregate_multimodal_metrics_nki')
        wf.connect(multimodal_in_data_name_infosource,
                   'multimodal_in_data_name', aggregate_multimodal_metrics_nki,
                   'multimodal_list')
        aggregate_multimodal_metrics_nki.inputs.unimodal_lookup_dict = unimodal_lookup_dict_nki

        ###############################################################################################################
        # GET INDEXER FOR SUBJECTS OF INTEREST (as defined by selection criterium)
        select_subjects_nki = Node(util.Function(input_names=[
            'df_all_subjects_pickle_file', 'subjects_selection_crit_dict',
            'selection_criterium'
        ],
                                                 output_names=[
                                                     'df_use_file',
                                                     'df_use_pickle_file',
                                                     'subjects_selection_index'
                                                 ],
                                                 function=select_subjects_fct),
                                   name='select_subjects_nki')

        select_subjects_nki.inputs.df_all_subjects_pickle_file = df_all_subjects_pickle_file_nki
        select_subjects_nki.inputs.subjects_selection_crit_dict = subjects_selection_crit_dict_nki
        select_subjects_nki.inputs.selection_criterium = subjects_selection_crit_name_nki

        ###############################################################################################################
        # SELECT MULITMODAL X
        # select subjects (rows) from multimodal X according indexer
        select_multimodal_X_nki = Node(util.Function(
            input_names=[
                'X_multimodal_file', 'subjects_selection_index',
                'selection_criterium'
            ],
            output_names=['X_multimodal_selected_file'],
            function=select_multimodal_X_fct),
                                       name='select_multimodal_X_nki')
        wf.connect(aggregate_multimodal_metrics_nki, 'X_multimodal_file',
                   select_multimodal_X_nki, 'X_multimodal_file')
        wf.connect(select_subjects_nki, 'subjects_selection_index',
                   select_multimodal_X_nki, 'subjects_selection_index')

    ###############################################################################################################
    # RUN PREDICTION
    #
    prediction_node_dict = {}
    backprojection_node_dict = {}

    prediction_split = Node(util.Function(
        input_names=[
            'X_file', 'target_name', 'selection_criterium', 'df_file',
            'data_str', 'regress_confounds', 'run_cv', 'n_jobs_cv',
            'run_tuning', 'X_file_nki', 'df_file_nki', 'reverse_split',
            'random_state_nki', 'run_learning_curve', 'life_test_size'
        ],
        output_names=[
            'scatter_file', 'brain_age_scatter_file', 'df_life_out_file',
            'df_nki_out_file', 'df_big_out_file', 'model_out_file',
            'df_res_out_file', 'tuning_curve_file', 'scatter_file_cv',
            'learning_curve_plot_file', 'learning_curve_df_file'
        ],
        function=run_prediction_split_fct),
                            name='prediction_split')

    backproject_and_split_weights = Node(util.Function(
        input_names=[
            'trained_model_file', 'multimodal_backprojection_info', 'data_str',
            'target_name'
        ],
        output_names=['out_file_list', 'out_file_render_list'],
        function=backproject_and_split_weights_fct),
                                         name='backproject_and_split_weights')

    i = 0

    for reg in confound_regression:
        the_out_node_str = 'single_source_model_reg_%s_' % (reg)
        prediction_node_dict[i] = prediction_split.clone(the_out_node_str)
        the_in_node = prediction_node_dict[i]
        the_in_node.inputs.regress_confounds = reg
        the_in_node.inputs.run_cv = run_cv
        the_in_node.inputs.n_jobs_cv = n_jobs_cv
        the_in_node.inputs.run_tuning = run_tuning
        the_in_node.inputs.reverse_split = reverse_split
        the_in_node.inputs.random_state_nki = random_state_nki
        the_in_node.inputs.run_learning_curve = run_learning_curve
        the_in_node.inputs.life_test_size = life_test_size

        wf.connect(select_multimodal_X, 'X_multimodal_selected_file',
                   the_in_node, 'X_file')
        wf.connect(target_infosource, 'target_name', the_in_node,
                   'target_name')
        wf.connect(subject_selection_infosource, 'selection_criterium',
                   the_in_node, 'selection_criterium')
        wf.connect(select_subjects, 'df_use_pickle_file', the_in_node,
                   'df_file')
        wf.connect(aggregate_multimodal_metrics, 'multimodal_name',
                   the_in_node, 'data_str')

        wf.connect(the_in_node, 'model_out_file', ds,
                   the_out_node_str + 'trained_model')
        wf.connect(the_in_node, 'scatter_file', ds_pdf,
                   the_out_node_str + 'scatter')
        wf.connect(the_in_node, 'brain_age_scatter_file', ds_pdf,
                   the_out_node_str + 'brain_age_scatter')
        wf.connect(the_in_node, 'df_life_out_file', ds_pdf,
                   the_out_node_str + 'predicted_life')
        wf.connect(the_in_node, 'df_nki_out_file', ds_pdf,
                   the_out_node_str + 'predicted_nki')
        wf.connect(the_in_node, 'df_big_out_file', ds_pdf,
                   the_out_node_str + 'predicted')

        wf.connect(the_in_node, 'df_res_out_file', ds_pdf,
                   the_out_node_str + 'results_error')
        wf.connect(the_in_node, 'tuning_curve_file', ds_pdf,
                   the_out_node_str + 'tuning_curve')
        wf.connect(the_in_node, 'scatter_file_cv', ds_pdf,
                   the_out_node_str + 'scatter_cv')
        wf.connect(the_in_node, 'learning_curve_plot_file', ds_pdf,
                   the_out_node_str + 'learning_curve_plot_file.@plot')
        wf.connect(the_in_node, 'learning_curve_df_file', ds_pdf,
                   the_out_node_str + 'learning_curve_df_file.@df')

        # NKI
        if run_2sample_training:
            wf.connect(select_multimodal_X_nki, 'X_multimodal_selected_file',
                       the_in_node, 'X_file_nki')
            wf.connect(select_subjects_nki, 'df_use_pickle_file', the_in_node,
                       'df_file_nki')

        else:
            the_in_node.inputs.df_file_nki = None
            the_in_node.inputs.X_file_nki = None

        # BACKPROJECT PREDICTION WEIGHTS
        # map weights back to single modality original format (e.g., nifti or matrix)
        the_out_node_str = 'backprojection_single_source_model_reg_%s_' % (reg)
        backprojection_node_dict[i] = backproject_and_split_weights.clone(
            the_out_node_str)
        the_from_node = prediction_node_dict[i]
        the_in_node = backprojection_node_dict[i]
        wf.connect(the_from_node, 'model_out_file', the_in_node,
                   'trained_model_file')
        wf.connect(aggregate_multimodal_metrics,
                   'multimodal_backprojection_info', the_in_node,
                   'multimodal_backprojection_info')
        wf.connect(aggregate_multimodal_metrics, 'multimodal_name',
                   the_in_node, 'data_str')
        wf.connect(target_infosource, 'target_name', the_in_node,
                   'target_name')

        wf.connect(the_in_node, 'out_file_list', ds_pdf,
                   the_out_node_str + '.@weights')
        wf.connect(the_in_node, 'out_file_render_list', ds_pdf,
                   the_out_node_str + 'renders.@renders')

        i += 1

    ###############################################################################################################
    # #  RUN WF
    wf.write_graph(dotfilename=wf.name, graph2use='colored',
                   format='pdf')  # 'hierarchical')
    wf.write_graph(dotfilename=wf.name, graph2use='orig', format='pdf')
    wf.write_graph(dotfilename=wf.name, graph2use='flat', format='pdf')

    if plugin_name == 'CondorDAGMan':
        wf.run(plugin=plugin_name)
    if plugin_name == 'MultiProc':
        wf.run(plugin=plugin_name, plugin_args={'n_procs': use_n_procs})
Exemplo n.º 7
0
morph_closing = Node(fs.Binarize(min=0.5,
                                 dilate=10,
                                 erode=10),
                     name='morph_close')

medwall.connect([(addmasks, morph_closing, [('out_file', 'in_file')])])



'''alternative with thickness'''
wallmask_rh = Node(fs.Binarize(max=0.2,
                            out_type = 'nii.gz'), 
               name='wallmask_rh')
  
wallmask_lh = wallmask_rh.clone('wallmask_lh')
 
medwall.connect([(selectfiles, wallmask_rh, [('thickness_rh', 'in_file')]),
                 (selectfiles, wallmask_lh, [('thickness_lh', 'in_file')])
                 ])

addmasks2= Node(fsl.BinaryMaths(operation='add'),
               name='addmasks2')

medwall.connect([(wallmask_rh, addmasks2, [('binary_file', 'in_file')]),
                 (wallmask_lh, addmasks2, [('binary_file', 'operand_file')])])

'''
followed by
3dclust -savemask $out 0 20 $in
'''
Exemplo n.º 8
0
def func2anat_linear():

    import nipype.interfaces.fsl as fsl
    bbr_shedule = '/usr/share/fsl/5.0/etc/flirtsch/bbr.sch'

    #define workflow
    linear  = Workflow('func2anat_linear')

    inputnode  = Node(util.IdentityInterface(fields=['func_image',
                                                     'func_mask',
                                                     'reference_image',
                                                     'anat_wm',
                                                     'anat_csf',
                                                     'anat_gm',
                                                     'anat_first',]),
                     name = 'inputnode')

    outputnode = Node(util.IdentityInterface(fields=['func2anat',
                                                     'func2anat_xfm',
                                                     'anat_downsample',
                                                     'anat2func_xfm',
                                                     'anat2func',
                                                     'func_gm',
                                                     'func_wm',
                                                     'func_csf',
                                                     'func_first']),
                     name= 'outputnode')

    anatdownsample                      = Node(interface= fsl.FLIRT(), name = 'downsample_anat')
    anatdownsample.inputs.apply_isoxfm  = 2.3
    anatdownsample.inputs.datatype      = 'float'

    # run flirt with mutual info
    mutual_info              = Node(interface= fsl.FLIRT(), name = 'func2anat_flirt0_mutualinfo')
    mutual_info.inputs.cost  = 'mutualinfo'
    mutual_info.inputs.dof   = 6
    mutual_info.inputs.no_resample   = True

    # run flirt boundary based registration on a func_moco_disco using
    # (a) white matter segment as a boundary and (b) the mutualinfo xfm for initialization
    bbr                      = Node(interface= fsl.FLIRT(), name = 'func2anat_flirt1_bbr')
    bbr.inputs.cost          = 'bbr'
    bbr.inputs.dof           = 6
    bbr.inputs.schedule      = bbr_shedule
    bbr.inputs.no_resample   = True

    convert_xfm                    = Node(interface= fsl.ConvertXFM(), name ='anat2func_xfm')
    convert_xfm.inputs.invert_xfm  = True

    #connect nodes
    linear.connect(inputnode          , 'reference_image'     ,    anatdownsample  , 'in_file'    )
    linear.connect(inputnode          , 'reference_image'     ,    anatdownsample  , 'reference'  )
    linear.connect(inputnode          , 'func_image'          ,    mutual_info     , 'in_file'    )
    linear.connect(anatdownsample     , 'out_file'            ,    mutual_info     , 'reference'  )
    linear.connect(inputnode          , 'func_image'          ,    bbr             , 'in_file'         )
    linear.connect(anatdownsample     , 'out_file'            ,    bbr             , 'reference'       )
    linear.connect(inputnode          , 'anat_wm'             ,    bbr             , 'wm_seg'          )
    linear.connect(mutual_info        , 'out_matrix_file'     ,    bbr             , 'in_matrix_file'  )
    linear.connect(bbr                , 'out_matrix_file'     ,    convert_xfm     , 'in_file'         )
    linear.connect(bbr                , 'out_file'            ,    outputnode      , 'func2anat'       )
    linear.connect(bbr                , 'out_matrix_file'     ,    outputnode      , 'func2anat_xfm'   )
    linear.connect(convert_xfm        , 'out_file'            ,    outputnode      , 'anat2func_xfm'   )
    linear.connect(anatdownsample     , 'out_file'            ,    outputnode      , 'anat_downsample' )

    anat_invxfm                = Node(interface= fsl.ApplyXfm(), name ='apply_invxfm_anat')
    anat_invxfm.inputs.apply_xfm = True

    linear.connect(anatdownsample     , 'out_file'            ,    anat_invxfm, 'in_file')
    linear.connect(inputnode          , 'func_image'          ,    anat_invxfm, 'reference')
    linear.connect(convert_xfm        , 'out_file'            ,    anat_invxfm, 'in_matrix_file')
    linear.connect(anat_invxfm        , 'out_file'            ,    outputnode, 'anat2func')

    # flirt tissue masks back to func space
    gm_invxfm                = Node(interface= fsl.ApplyXfm(), name ='apply_invxfm_gm')
    gm_invxfm.inputs.apply_xfm = True
    bin_gm =  Node(interface= fsl.Threshold(), name ='apply_invxfm_gm_bin')
    bin_gm.inputs.thresh      = 0.5
    bin_gm.inputs.args        = '-bin'
    mask_gm = Node(interface=fsl.BinaryMaths(), name='func_gm')
    mask_gm.inputs.operation = 'mul'

    linear.connect(inputnode          , 'anat_gm'             ,    gm_invxfm, 'in_file')
    linear.connect(inputnode          , 'func_image'          ,    gm_invxfm, 'reference')
    linear.connect(convert_xfm        , 'out_file'            ,    gm_invxfm, 'in_matrix_file')
    linear.connect(gm_invxfm          , 'out_file'            ,    bin_gm,    'in_file')
    linear.connect(bin_gm             , 'out_file'            ,    mask_gm,   'in_file')
    linear.connect(inputnode          , 'func_mask'           ,    mask_gm,   'operand_file')
    linear.connect(mask_gm            , 'out_file'            ,    outputnode,'func_gm')

    wm_invxfm = gm_invxfm.clone('apply_invxfm_wm')
    bin_wm    = bin_gm.clone('apply_invxfm_wm_bin')
    mask_wm   = mask_gm.clone('func_wm')

    linear.connect(inputnode          , 'anat_wm'             ,    wm_invxfm, 'in_file')
    linear.connect(inputnode          , 'func_image'          ,    wm_invxfm, 'reference')
    linear.connect(convert_xfm        , 'out_file'            ,    wm_invxfm, 'in_matrix_file')
    linear.connect(wm_invxfm          , 'out_file'            ,    bin_wm,    'in_file')
    linear.connect(bin_wm             , 'out_file'            ,    mask_wm,   'in_file')
    linear.connect(inputnode          , 'func_mask'           ,    mask_wm,   'operand_file')
    linear.connect(mask_wm            , 'out_file'            ,    outputnode,'func_wm')

    cm_invxfm  = gm_invxfm.clone('apply_invxfm_csf')
    bin_cm     = bin_gm.clone('apply_invxfm_csf_bin')
    mask_cm    = mask_gm.clone('func_csf')

    linear.connect(inputnode          , 'anat_csf'            ,    cm_invxfm, 'in_file')
    linear.connect(inputnode          , 'func_image'          ,    cm_invxfm, 'reference')
    linear.connect(convert_xfm        , 'out_file'            ,    cm_invxfm, 'in_matrix_file')
    linear.connect(cm_invxfm          , 'out_file'            ,    bin_cm,    'in_file')
    linear.connect(bin_cm             , 'out_file'            ,    mask_cm,   'in_file')
    linear.connect(inputnode          , 'func_mask'           ,    mask_cm,   'operand_file')
    linear.connect(mask_cm            , 'out_file'            ,    outputnode,'func_csf')

    first_invxfm  = gm_invxfm.clone('apply_invxfm_first')
    bin_first =  Node(interface= fsl.Threshold(), name ='apply_invxfm_first_bin')
    bin_first.inputs.thresh      = 12
    bin_first.inputs.args        = '-bin'
    mask_first    = mask_gm.clone('func_first')

    linear.connect(inputnode          , 'anat_first'          ,    first_invxfm, 'in_file')
    linear.connect(inputnode          , 'func_image'          ,    first_invxfm, 'reference')
    linear.connect(convert_xfm        , 'out_file'            ,    first_invxfm, 'in_matrix_file')
    linear.connect(first_invxfm       , 'out_file'            ,    bin_first,    'in_file')
    linear.connect(bin_first          , 'out_file'            ,    mask_first,   'in_file')
    linear.connect(inputnode          , 'func_mask'           ,    mask_first,   'operand_file')
    linear.connect(mask_first         , 'out_file'            ,    outputnode,   'func_first')

    return linear
Exemplo n.º 9
0
def run_tbss_wf(subject_id,
                working_dir,
                ds_dir,
                use_n_procs,
                plugin_name,
                dMRI_templates,
                in_path):
    import os
    from nipype import config
    from nipype.pipeline.engine import Node, Workflow, MapNode, JoinNode
    import nipype.interfaces.utility as util
    import nipype.interfaces.io as nio
    from nipype.interfaces import fsl, dipy

    from LeiCA.dMRI.nipype_11_workflows_dmri_fsl_snapshot.artifacts import hmc_pipeline, ecc_pipeline
    from LeiCA.dMRI.nipype_11_workflows_dmri_fsl_snapshot.utils import b0_average, extract_bval
    from LeiCA.dMRI.nipype_11_workflows_dmri_fsl_snapshot.tbss import create_tbss_all
    from nipype.workflows.dmri.dipy.denoise import nlmeans_pipeline
    from diffusion_utils import apply_hmc_and_ecc

    #####################################
    # GENERAL SETTINGS
    #####################################
    wf = Workflow(name='tbss_wf')
    wf.base_dir = os.path.join(working_dir)

    nipype_cfg = dict(logging=dict(workflow_level='DEBUG'), execution={'stop_on_first_crash': True,
                                                                       'remove_unnecessary_outputs': False,
                                                                       'job_finished_timeout': 15})
    config.update_config(nipype_cfg)
    wf.config['execution']['crashdump_dir'] = os.path.join(working_dir, 'crash')

    ds = Node(nio.DataSink(base_directory=ds_dir), name='ds')

    ds.inputs.regexp_substitutions = [
        ('_subject_id_[A0-9]*/', ''),
    ]


    # GET SUBJECT SPECIFIC FUNCTIONAL DATA
    selectfiles = Node(nio.SelectFiles(dMRI_templates, base_directory=in_path), name="selectfiles")
    selectfiles.inputs.subject_id = subject_id

    #####################################
    # WF
    #####################################


    def _bvals_with_nodiff_0_fct(in_bval, lowbval):
        ''' returns bval file with 0 in place of lowbvals
        '''
        import os
        import numpy as np
        bvals = np.loadtxt(in_bval)
        bvals[bvals <= lowbval] = 0
        bval_file_zero = os.path.abspath('bval_0.bval')
        np.savetxt(bval_file_zero, bvals)
        return bval_file_zero

    # CREATE BVALS FILES WITH 0 IN LOWBVAL SLICES. FOR ECC ONLY
    bvals_with_nodiff_0 = Node(util.Function(input_names=['in_bval', 'lowbval'],
                                             output_names=['bval_file_zero'],
                                             function=_bvals_with_nodiff_0_fct), name='bvals_with_nodiff_0')
    wf.connect(selectfiles, 'bval_file', bvals_with_nodiff_0, 'in_bval')
    bvals_with_nodiff_0.inputs.lowbval = 5

    ##
    # GET B0 MASK
    b0_4d_init_0 = Node(util.Function(input_names=['in_dwi', 'in_bval', 'b'], output_names=['out_file'],
                                      function=extract_bval), name='b0_4d_init_0')
    wf.connect(selectfiles, 'dMRI_data', b0_4d_init_0, 'in_dwi')
    #wf.connect(selectfiles, 'bval_file', b0_4d_init_0, 'in_bval')
    wf.connect(bvals_with_nodiff_0, 'bval_file_zero', b0_4d_init_0, 'in_bval')
    b0_4d_init_0.inputs.b = 'nodiff'

    first_b0 = Node(fsl.ExtractROI(t_min=0, t_size=1), name='first_b0')
    wf.connect(b0_4d_init_0, 'out_file', first_b0, 'in_file')

    flirt = Node(fsl.FLIRT(dof=6, out_file='b0_moco.nii.gz'), name='flirt')
    wf.connect(b0_4d_init_0, 'out_file', flirt, 'in_file')
    wf.connect(first_b0, 'roi_file', flirt, 'reference')

    mean_b0_moco_init_0 = Node(fsl.MeanImage(), name='mean_b0_moco_init_0')
    wf.connect(flirt, 'out_file', mean_b0_moco_init_0, 'in_file')

    b0_mask_init_0 = Node(fsl.BET(frac=0.3, mask=True, robust=True), name='b0_mask_init_0')
    wf.connect(mean_b0_moco_init_0, 'out_file', b0_mask_init_0, 'in_file')



    # HEAD MOTION CORRECTION PIPELINE
    hmc = hmc_pipeline()
    wf.connect(selectfiles, 'dMRI_data', hmc, 'inputnode.in_file')
    #wf.connect(selectfiles, 'bval_file', hmc, 'inputnode.in_bval')
    wf.connect(bvals_with_nodiff_0, 'bval_file_zero', hmc, 'inputnode.in_bval')
    wf.connect(selectfiles, 'bvec_file', hmc, 'inputnode.in_bvec')
    wf.connect(b0_mask_init_0, 'mask_file', hmc, 'inputnode.in_mask')
    hmc.inputs.inputnode.ref_num = 0

    wf.connect(hmc, 'outputnode.out_file', ds, 'moco')


    # GET UPDATED MEAN B0 AND MASK
    b0_4d_init_1 = b0_4d_init_0.clone('b0_4d_init_1')
    wf.connect(hmc, 'outputnode.out_file', b0_4d_init_1, 'in_dwi')
    #wf.connect(selectfiles, 'bval_file', b0_4d_init_1, 'in_bval')
    wf.connect(bvals_with_nodiff_0, 'bval_file_zero', b0_4d_init_1, 'in_bval')

    mean_b0_moco_init_1 = mean_b0_moco_init_0.clone('mean_b0_moco_init_1')
    wf.connect(b0_4d_init_1, 'out_file', mean_b0_moco_init_1, 'in_file')

    b0_mask_init_1 = b0_mask_init_0.clone('b0_mask_init_1')
    wf.connect(mean_b0_moco_init_1, 'out_file', b0_mask_init_1, 'in_file')


    # EDDY
    ecc = ecc_pipeline()
    wf.connect(selectfiles, 'dMRI_data', ecc, 'inputnode.in_file')
    #wf.connect(selectfiles, 'bval_file', ecc, 'inputnode.in_bval')
    wf.connect(bvals_with_nodiff_0, 'bval_file_zero', ecc, 'inputnode.in_bval')
    wf.connect(b0_mask_init_1, 'mask_file', ecc, 'inputnode.in_mask')
    wf.connect(hmc, 'outputnode.out_xfms', ecc, 'inputnode.in_xfms')

    wf.connect(ecc, 'outputnode.out_file', ds, 'ecc')


    combine_corrections = apply_hmc_and_ecc(name='combine_corrections')
    wf.connect(hmc, 'outputnode.out_xfms', combine_corrections, 'inputnode.in_hmc')
    wf.connect(ecc, 'outputnode.out_xfms', combine_corrections, 'inputnode.in_ecc')
    wf.connect(selectfiles, 'dMRI_data', combine_corrections, 'inputnode.in_dwi')

    wf.connect(combine_corrections, 'outputnode.out_file', ds, 'preprocessed')

    # GET UPDATED MEAN B0 AND MASK
    b0_4d = b0_4d_init_0.clone('b0_4d')
    wf.connect(combine_corrections, 'outputnode.out_file', b0_4d, 'in_dwi')
    #wf.connect(selectfiles, 'bval_file', b0_4d, 'in_bval')
    wf.connect(bvals_with_nodiff_0, 'bval_file_zero', b0_4d, 'in_bval')

    mean_b0_moco = mean_b0_moco_init_0.clone('mean_b0_moco')
    wf.connect(b0_4d, 'out_file', mean_b0_moco, 'in_file')

    b0_mask = b0_mask_init_0.clone('b0_mask')
    wf.connect(mean_b0_moco, 'out_file', b0_mask, 'in_file')

    # denoise = Node(dipy.Denoise(), name='denoise')
    # wf.connect(combine_corrections, 'outputnode.out_file', denoise, 'in_file')
    # wf.connect(b0_mask, 'mask_file', denoise, 'in_mask')
    # wf.connect(denoise, 'out_file', ds, 'denoised')

    # check if ok fixme
    denoise = nlmeans_pipeline()
    wf.connect(combine_corrections, 'outputnode.out_file', denoise, 'inputnode.in_file')
    wf.connect(b0_mask, 'mask_file', denoise, 'inputnode.in_mask')
    wf.connect(denoise, 'outputnode.out_file', ds, 'denoised')


    # DTIFIT
    dtifit = Node(interface=fsl.DTIFit(), name='dtifit')
    wf.connect(combine_corrections, 'outputnode.out_file', dtifit, 'dwi')
    wf.connect(b0_mask, 'mask_file', dtifit, 'mask')
    wf.connect(hmc, 'outputnode.out_bvec', dtifit, 'bvecs')
    wf.connect(selectfiles, 'bval_file', dtifit, 'bvals')

    wf.connect(dtifit, 'FA', ds, 'dtifit.@FA')
    wf.connect(dtifit, 'L1', ds, 'dtifit.@L1')
    wf.connect(dtifit, 'L2', ds, 'dtifit.@L2')
    wf.connect(dtifit, 'L3', ds, 'dtifit.@L3')
    wf.connect(dtifit, 'MD', ds, 'dtifit.@MD')
    wf.connect(dtifit, 'MO', ds, 'dtifit.@MO')
    wf.connect(dtifit, 'S0', ds, 'dtifit.@S0')
    wf.connect(dtifit, 'V1', ds, 'dtifit.@V1')
    wf.connect(dtifit, 'V2', ds, 'dtifit.@V2')
    wf.connect(dtifit, 'V3', ds, 'dtifit.@V3')
    wf.connect(dtifit, 'tensor', ds, 'dtifit.@tensor')

    RD_sum = Node(fsl.ImageMaths(op_string='-add '), name='RD_sum')
    wf.connect(dtifit, 'L2', RD_sum, 'in_file')
    wf.connect(dtifit, 'L3', RD_sum, 'in_file2')

    RD = Node(fsl.ImageMaths(op_string='-div 2', out_file='RD.nii.gz'), name='RD')
    wf.connect(RD_sum, 'out_file', RD, 'in_file')
    wf.connect(RD, 'out_file', ds, 'dtifit.@RD')

    simple_ecc = Node(fsl.EddyCorrect(), name='simple_ecc')
    wf.connect(selectfiles, 'dMRI_data', simple_ecc, 'in_file')
    wf.connect(simple_ecc, 'eddy_corrected', ds, 'simple_ecc')


    # DTIFIT DENOISED
    dtifit_denoised = Node(interface=fsl.DTIFit(), name='dtifit_denoised')
    wf.connect(denoise, 'outputnode.out_file', dtifit_denoised, 'dwi')
    wf.connect(b0_mask, 'mask_file', dtifit_denoised, 'mask')
    wf.connect(hmc, 'outputnode.out_bvec', dtifit_denoised, 'bvecs')
    wf.connect(selectfiles, 'bval_file', dtifit_denoised, 'bvals')

    wf.connect(dtifit_denoised, 'FA', ds, 'dtifit_denoised.@FA')
    wf.connect(dtifit_denoised, 'L1', ds, 'dtifit_denoised.@L1')
    wf.connect(dtifit_denoised, 'L2', ds, 'dtifit_denoised.@L2')
    wf.connect(dtifit_denoised, 'L3', ds, 'dtifit_denoised.@L3')
    wf.connect(dtifit_denoised, 'MD', ds, 'dtifit_denoised.@MD')
    wf.connect(dtifit_denoised, 'MO', ds, 'dtifit_denoised.@MO')
    wf.connect(dtifit_denoised, 'S0', ds, 'dtifit_denoised.@S0')
    wf.connect(dtifit_denoised, 'V1', ds, 'dtifit_denoised.@V1')
    wf.connect(dtifit_denoised, 'V2', ds, 'dtifit_denoised.@V2')
    wf.connect(dtifit_denoised, 'V3', ds, 'dtifit_denoised.@V3')



    #
    def _file_to_list(in_file):
        if type(in_file) is not list:
            return [in_file]
        else:
            return in_file

    in_file_to_list = Node(util.Function(input_names=['in_file'], output_names=['out_file'], function=_file_to_list), name='in_file_to_list')
    wf.connect(dtifit, 'FA', in_file_to_list, 'in_file')

    # TBSS
    tbss = create_tbss_all(estimate_skeleton=False)
    tbss.inputs.inputnode.skeleton_thresh = 0.2
    wf.connect(in_file_to_list, 'out_file', tbss, 'inputnode.fa_list')

    wf.connect(tbss, 'outputall_node.mergefa_file3', ds, 'tbss.@mergefa')
    wf.connect(tbss, 'outputnode.projectedfa_file', ds, 'tbss.@projectedfa_file')
    wf.connect(tbss, 'outputnode.skeleton_file4', ds, 'tbss.@skeleton_file')
    wf.connect(tbss, 'outputnode.skeleton_mask', ds, 'tbss.@skeleton_mask')
    # outputnode.meanfa_file
    # outputnode.projectedfa_file
    # outputnode.skeleton_file
    # outputnode.skeleton_mask

    #####################################
    # RUN WF
    #####################################
    wf.write_graph(dotfilename=wf.name, graph2use='colored', format='pdf')  # 'hierarchical')
    wf.write_graph(dotfilename=wf.name, graph2use='orig', format='pdf')
    wf.write_graph(dotfilename=wf.name, graph2use='flat', format='pdf')

    if plugin_name == 'CondorDAGMan':
        wf.run(plugin=plugin_name)
    if plugin_name == 'MultiProc':
        wf.run(plugin=plugin_name, plugin_args={'n_procs': use_n_procs})
Exemplo n.º 10
0
def learning_predict_data_2samp_wf(working_dir,
                                   ds_dir,
                                   in_data_name_list,
                                   subjects_selection_crit_dict,
                                   subjects_selection_crit_names_list,
                                   aggregated_subjects_dir,
                                   target_list,
                                   use_n_procs,
                                   plugin_name,
                                   confound_regression=[False, True],
                                   run_cv=False,
                                   n_jobs_cv=1,
                                   run_tuning=False,
                                   run_2sample_training=False,
                                   aggregated_subjects_dir_nki=None,
                                   subjects_selection_crit_dict_nki=None,
                                   subjects_selection_crit_name_nki=None,
                                   reverse_split=False,
                                   random_state_nki=666,
                                   run_learning_curve=False,
                                   life_test_size=0.5):
    import os
    from nipype import config
    from nipype.pipeline.engine import Node, Workflow
    import nipype.interfaces.utility as util
    import nipype.interfaces.io as nio
    from itertools import chain
    from learning_utils import aggregate_multimodal_metrics_fct, run_prediction_split_fct, \
        backproject_and_split_weights_fct, select_subjects_fct, select_multimodal_X_fct, learning_curve_plot
    import pandas as pd



    ###############################################################################################################
    # GENERAL SETTINGS

    wf = Workflow(name='learning_predict_data_2samp_wf')
    wf.base_dir = os.path.join(working_dir)

    nipype_cfg = dict(logging=dict(workflow_level='DEBUG'),
                      execution={'stop_on_first_crash': False,
                                 'remove_unnecessary_outputs': False,
                                 'job_finished_timeout': 120})
    config.update_config(nipype_cfg)
    wf.config['execution']['crashdump_dir'] = os.path.join(working_dir, 'crash')

    ds = Node(nio.DataSink(), name='ds')
    ds.inputs.base_directory = os.path.join(ds_dir, 'group_learning_prepare_data')

    ds.inputs.regexp_substitutions = [
        # ('subject_id_', ''),
        ('_parcellation_', ''),
        ('_bp_freqs_', 'bp_'),
        ('_extraction_method_', ''),
        ('_subject_id_[A0-9]*/', '')
    ]
    ds_pdf = Node(nio.DataSink(), name='ds_pdf')
    ds_pdf.inputs.base_directory = os.path.join(ds_dir, 'pdfs')
    ds_pdf.inputs.parameterization = False



    ###############################################################################################################
    # ensure in_data_name_list is list of lists
    in_data_name_list = [i if type(i) == list else [i] for i in in_data_name_list]
    in_data_name_list_unique = list(set(chain.from_iterable(in_data_name_list)))



    ###############################################################################################################
    # SET ITERATORS

    in_data_name_infosource = Node(util.IdentityInterface(fields=['in_data_name']), name='in_data_name_infosource')
    in_data_name_infosource.iterables = ('in_data_name', in_data_name_list_unique)

    multimodal_in_data_name_infosource = Node(util.IdentityInterface(fields=['multimodal_in_data_name']),
                                              name='multimodal_in_data_name_infosource')
    multimodal_in_data_name_infosource.iterables = ('multimodal_in_data_name', in_data_name_list)

    subject_selection_infosource = Node(util.IdentityInterface(fields=['selection_criterium']),
                                        name='subject_selection_infosource')
    subject_selection_infosource.iterables = ('selection_criterium', subjects_selection_crit_names_list)

    target_infosource = Node(util.IdentityInterface(fields=['target_name']), name='target_infosource')
    target_infosource.iterables = ('target_name', target_list)



    ###############################################################################################################
    # COMPILE LIFE DATA
    ###############################################################################################################

    ###############################################################################################################
    # GET INFO AND SELECT FILES
    df_all_subjects_pickle_file = os.path.join(aggregated_subjects_dir, 'df_all_subjects_pickle_file/df_all.pkl')
    df = pd.read_pickle(df_all_subjects_pickle_file)

    # build lookup dict for unimodal data
    X_file_template = 'X_file/_in_data_name_{in_data_name}/vectorized_aggregated_data.npy'
    info_file_template = 'unimodal_backprojection_info_file/_in_data_name_{in_data_name}/unimodal_backprojection_info.pkl'
    unimodal_lookup_dict = {}
    for k in in_data_name_list_unique:
        unimodal_lookup_dict[k] = {'X_file': os.path.join(aggregated_subjects_dir, X_file_template.format(
            in_data_name=k)),
                                   'unimodal_backprojection_info_file': os.path.join(aggregated_subjects_dir,
                                                                                     info_file_template.format(
                                                                                         in_data_name=k))
                                   }



    ###############################################################################################################
    # AGGREGATE MULTIMODAL METRICS
    # stack single modality arrays horizontally
    aggregate_multimodal_metrics = Node(util.Function(input_names=['multimodal_list', 'unimodal_lookup_dict'],
                                                      output_names=['X_multimodal_file',
                                                                    'multimodal_backprojection_info',
                                                                    'multimodal_name'],
                                                      function=aggregate_multimodal_metrics_fct),
                                        name='aggregate_multimodal_metrics')
    wf.connect(multimodal_in_data_name_infosource, 'multimodal_in_data_name', aggregate_multimodal_metrics,
               'multimodal_list')
    aggregate_multimodal_metrics.inputs.unimodal_lookup_dict = unimodal_lookup_dict



    ###############################################################################################################
    # GET INDEXER FOR SUBJECTS OF INTEREST (as defined by selection criterium)
    select_subjects = Node(util.Function(input_names=['df_all_subjects_pickle_file',
                                                      'subjects_selection_crit_dict',
                                                      'selection_criterium'],
                                         output_names=['df_use_file',
                                                       'df_use_pickle_file',
                                                       'subjects_selection_index'],
                                         function=select_subjects_fct),
                           name='select_subjects')

    select_subjects.inputs.df_all_subjects_pickle_file = df_all_subjects_pickle_file
    select_subjects.inputs.subjects_selection_crit_dict = subjects_selection_crit_dict
    wf.connect(subject_selection_infosource, 'selection_criterium', select_subjects, 'selection_criterium')



    ###############################################################################################################
    # SELECT MULITMODAL X
    # select subjects (rows) from multimodal X according indexer
    select_multimodal_X = Node(util.Function(input_names=['X_multimodal_file', 'subjects_selection_index',
                                                          'selection_criterium'],
                                             output_names=['X_multimodal_selected_file'],
                                             function=select_multimodal_X_fct),
                               name='select_multimodal_X')
    wf.connect(aggregate_multimodal_metrics, 'X_multimodal_file', select_multimodal_X, 'X_multimodal_file')
    wf.connect(select_subjects, 'subjects_selection_index', select_multimodal_X, 'subjects_selection_index')






    ###############################################################################################################
    # COMPILE NKI DATA
    ###############################################################################################################
    if run_2sample_training:

        ###############################################################################################################
        # GET INFO AND SELECT FILES
        df_all_subjects_pickle_file_nki = os.path.join(aggregated_subjects_dir_nki,
                                                       'df_all_subjects_pickle_file/df_all.pkl')
        df_nki = pd.read_pickle(df_all_subjects_pickle_file_nki)

        # build lookup dict for unimodal data
        X_file_template = 'X_file/_in_data_name_{in_data_name}/vectorized_aggregated_data.npy'
        info_file_template = 'unimodal_backprojection_info_file/_in_data_name_{in_data_name}/unimodal_backprojection_info.pkl'
        unimodal_lookup_dict_nki = {}
        for k in in_data_name_list_unique:
            unimodal_lookup_dict_nki[k] = {'X_file': os.path.join(aggregated_subjects_dir_nki, X_file_template.format(
                in_data_name=k)),
                                           'unimodal_backprojection_info_file': os.path.join(
                                               aggregated_subjects_dir_nki,
                                               info_file_template.format(
                                                   in_data_name=k))
                                           }



        ###############################################################################################################
        # AGGREGATE MULTIMODAL METRICS
        # stack single modality arrays horizontally
        aggregate_multimodal_metrics_nki = Node(util.Function(input_names=['multimodal_list', 'unimodal_lookup_dict'],
                                                              output_names=['X_multimodal_file',
                                                                            'multimodal_backprojection_info',
                                                                            'multimodal_name'],
                                                              function=aggregate_multimodal_metrics_fct),
                                                name='aggregate_multimodal_metrics_nki')
        wf.connect(multimodal_in_data_name_infosource, 'multimodal_in_data_name', aggregate_multimodal_metrics_nki,
                   'multimodal_list')
        aggregate_multimodal_metrics_nki.inputs.unimodal_lookup_dict = unimodal_lookup_dict_nki



        ###############################################################################################################
        # GET INDEXER FOR SUBJECTS OF INTEREST (as defined by selection criterium)
        select_subjects_nki = Node(util.Function(input_names=['df_all_subjects_pickle_file',
                                                              'subjects_selection_crit_dict',
                                                              'selection_criterium'],
                                                 output_names=['df_use_file',
                                                               'df_use_pickle_file',
                                                               'subjects_selection_index'],
                                                 function=select_subjects_fct),
                                   name='select_subjects_nki')

        select_subjects_nki.inputs.df_all_subjects_pickle_file = df_all_subjects_pickle_file_nki
        select_subjects_nki.inputs.subjects_selection_crit_dict = subjects_selection_crit_dict_nki
        select_subjects_nki.inputs.selection_criterium = subjects_selection_crit_name_nki



        ###############################################################################################################
        # SELECT MULITMODAL X
        # select subjects (rows) from multimodal X according indexer
        select_multimodal_X_nki = Node(util.Function(input_names=['X_multimodal_file', 'subjects_selection_index',
                                                                  'selection_criterium'],
                                                     output_names=['X_multimodal_selected_file'],
                                                     function=select_multimodal_X_fct),
                                       name='select_multimodal_X_nki')
        wf.connect(aggregate_multimodal_metrics_nki, 'X_multimodal_file', select_multimodal_X_nki, 'X_multimodal_file')
        wf.connect(select_subjects_nki, 'subjects_selection_index', select_multimodal_X_nki, 'subjects_selection_index')





    ###############################################################################################################
    # RUN PREDICTION
    #
    prediction_node_dict = {}
    backprojection_node_dict = {}

    prediction_split = Node(util.Function(input_names=['X_file',
                                                       'target_name',
                                                       'selection_criterium',
                                                       'df_file',
                                                       'data_str',
                                                       'regress_confounds',
                                                       'run_cv',
                                                       'n_jobs_cv',
                                                       'run_tuning',
                                                       'X_file_nki',
                                                       'df_file_nki',
                                                       'reverse_split',
                                                       'random_state_nki',
                                                       'run_learning_curve',
                                                       'life_test_size'],
                                          output_names=['scatter_file',
                                                        'brain_age_scatter_file',
                                                        'df_life_out_file',
                                                        'df_nki_out_file',
                                                        'df_big_out_file',
                                                        'model_out_file',
                                                        'df_res_out_file',
                                                        'tuning_curve_file',
                                                        'scatter_file_cv',
                                                        'learning_curve_plot_file',
                                                        'learning_curve_df_file'],
                                          function=run_prediction_split_fct),
                            name='prediction_split')

    backproject_and_split_weights = Node(util.Function(input_names=['trained_model_file',
                                                                    'multimodal_backprojection_info',
                                                                    'data_str',
                                                                    'target_name'],
                                                       output_names=['out_file_list',
                                                                     'out_file_render_list'],
                                                       function=backproject_and_split_weights_fct),
                                         name='backproject_and_split_weights')

    i = 0

    for reg in confound_regression:
        the_out_node_str = 'single_source_model_reg_%s_' % (reg)
        prediction_node_dict[i] = prediction_split.clone(the_out_node_str)
        the_in_node = prediction_node_dict[i]
        the_in_node.inputs.regress_confounds = reg
        the_in_node.inputs.run_cv = run_cv
        the_in_node.inputs.n_jobs_cv = n_jobs_cv
        the_in_node.inputs.run_tuning = run_tuning
        the_in_node.inputs.reverse_split = reverse_split
        the_in_node.inputs.random_state_nki = random_state_nki
        the_in_node.inputs.run_learning_curve = run_learning_curve
        the_in_node.inputs.life_test_size = life_test_size

        wf.connect(select_multimodal_X, 'X_multimodal_selected_file', the_in_node, 'X_file')
        wf.connect(target_infosource, 'target_name', the_in_node, 'target_name')
        wf.connect(subject_selection_infosource, 'selection_criterium', the_in_node, 'selection_criterium')
        wf.connect(select_subjects, 'df_use_pickle_file', the_in_node, 'df_file')
        wf.connect(aggregate_multimodal_metrics, 'multimodal_name', the_in_node, 'data_str')

        wf.connect(the_in_node, 'model_out_file', ds, the_out_node_str + 'trained_model')
        wf.connect(the_in_node, 'scatter_file', ds_pdf, the_out_node_str + 'scatter')
        wf.connect(the_in_node, 'brain_age_scatter_file', ds_pdf, the_out_node_str + 'brain_age_scatter')
        wf.connect(the_in_node, 'df_life_out_file', ds_pdf, the_out_node_str + 'predicted_life')
        wf.connect(the_in_node, 'df_nki_out_file', ds_pdf, the_out_node_str + 'predicted_nki')
        wf.connect(the_in_node, 'df_big_out_file', ds_pdf, the_out_node_str + 'predicted')

        wf.connect(the_in_node, 'df_res_out_file', ds_pdf, the_out_node_str + 'results_error')
        wf.connect(the_in_node, 'tuning_curve_file', ds_pdf, the_out_node_str + 'tuning_curve')
        wf.connect(the_in_node, 'scatter_file_cv', ds_pdf, the_out_node_str + 'scatter_cv')
        wf.connect(the_in_node, 'learning_curve_plot_file', ds_pdf, the_out_node_str + 'learning_curve_plot_file.@plot')
        wf.connect(the_in_node, 'learning_curve_df_file', ds_pdf, the_out_node_str + 'learning_curve_df_file.@df')

        # NKI
        if run_2sample_training:
            wf.connect(select_multimodal_X_nki, 'X_multimodal_selected_file', the_in_node, 'X_file_nki')
            wf.connect(select_subjects_nki, 'df_use_pickle_file', the_in_node, 'df_file_nki')

        else:
            the_in_node.inputs.df_file_nki = None
            the_in_node.inputs.X_file_nki = None

        # BACKPROJECT PREDICTION WEIGHTS
        # map weights back to single modality original format (e.g., nifti or matrix)
        the_out_node_str = 'backprojection_single_source_model_reg_%s_' % (reg)
        backprojection_node_dict[i] = backproject_and_split_weights.clone(the_out_node_str)
        the_from_node = prediction_node_dict[i]
        the_in_node = backprojection_node_dict[i]
        wf.connect(the_from_node, 'model_out_file', the_in_node, 'trained_model_file')
        wf.connect(aggregate_multimodal_metrics, 'multimodal_backprojection_info', the_in_node,
                   'multimodal_backprojection_info')
        wf.connect(aggregate_multimodal_metrics, 'multimodal_name', the_in_node, 'data_str')
        wf.connect(target_infosource, 'target_name', the_in_node, 'target_name')

        wf.connect(the_in_node, 'out_file_list', ds_pdf, the_out_node_str + '.@weights')
        wf.connect(the_in_node, 'out_file_render_list', ds_pdf, the_out_node_str + 'renders.@renders')

        i += 1



    ###############################################################################################################
    # #  RUN WF
    wf.write_graph(dotfilename=wf.name, graph2use='colored', format='pdf')  # 'hierarchical')
    wf.write_graph(dotfilename=wf.name, graph2use='orig', format='pdf')
    wf.write_graph(dotfilename=wf.name, graph2use='flat', format='pdf')

    if plugin_name == 'CondorDAGMan':
        wf.run(plugin=plugin_name)
    if plugin_name == 'MultiProc':
        wf.run(plugin=plugin_name, plugin_args={'n_procs': use_n_procs})
def learning_predict_data_wf(working_dir,
                             ds_dir,
                             trained_model_dir,
                             in_data_name_list,
                             subjects_selection_crit_dict,
                             subjects_selection_crit_names_list,
                             aggregated_subjects_dir,
                             target_list,
                             trained_model_template,
                             use_n_procs,
                             plugin_name,
                             confound_regression=[False, True]):
    # trained_model_template = {
    # 'trained_model': 'learning_out/group_learning_prepare_data/{ana_stream}trained_model/' +
    #                  '_multimodal_in_data_name_{multimodal_in_data_name}/_selection_criterium_bothSexes_neuH/' +
    #                  '_target_name_{target_name}/trained_model.pkl'}

    import os
    from nipype import config
    from nipype.pipeline.engine import Node, Workflow
    import nipype.interfaces.utility as util
    import nipype.interfaces.io as nio
    from itertools import chain
    from learning_utils import aggregate_multimodal_metrics_fct, run_prediction_from_trained_model_fct, \
        select_subjects_fct, select_multimodal_X_fct
    import pandas as pd

    ###############################################################################################################
    # GENERAL SETTINGS

    wf = Workflow(name='learning_predict_data_from_trained_model_wf')
    wf.base_dir = os.path.join(working_dir)

    nipype_cfg = dict(logging=dict(workflow_level='DEBUG'),
                      execution={'stop_on_first_crash': False,
                                 'remove_unnecessary_outputs': False,
                                 'job_finished_timeout': 120})
    config.update_config(nipype_cfg)
    wf.config['execution']['crashdump_dir'] = os.path.join(working_dir, 'crash')

    ds = Node(nio.DataSink(), name='ds')
    ds.inputs.base_directory = os.path.join(ds_dir, 'group_learning_prepare_data')

    ds.inputs.regexp_substitutions = [
        # ('subject_id_', ''),
        ('_parcellation_', ''),
        ('_bp_freqs_', 'bp_'),
        ('_extraction_method_', ''),
        ('_subject_id_[A0-9]*/', '')
    ]
    ds_pdf = Node(nio.DataSink(), name='ds_pdf')
    ds_pdf.inputs.base_directory = os.path.join(ds_dir, 'pdfs')
    ds_pdf.inputs.parameterization = False



    ###############################################################################################################
    # ensure in_data_name_list is list of lists
    in_data_name_list = [i if type(i) == list else [i] for i in in_data_name_list]
    in_data_name_list_unique = list(set(chain.from_iterable(in_data_name_list)))



    ###############################################################################################################
    # SET ITERATORS

    in_data_name_infosource = Node(util.IdentityInterface(fields=['in_data_name']), name='in_data_name_infosource')
    in_data_name_infosource.iterables = ('in_data_name', in_data_name_list_unique)

    multimodal_in_data_name_infosource = Node(util.IdentityInterface(fields=['multimodal_in_data_name']),
                                              name='multimodal_in_data_name_infosource')
    multimodal_in_data_name_infosource.iterables = ('multimodal_in_data_name', in_data_name_list)

    subject_selection_infosource = Node(util.IdentityInterface(fields=['selection_criterium']),
                                        name='subject_selection_infosource')
    subject_selection_infosource.iterables = ('selection_criterium', subjects_selection_crit_names_list)

    target_infosource = Node(util.IdentityInterface(fields=['target_name']), name='target_infosource')
    target_infosource.iterables = ('target_name', target_list)



    ###############################################################################################################
    # GET INFO AND SELECT FILES
    df_all_subjects_pickle_file = os.path.join(aggregated_subjects_dir, 'df_all_subjects_pickle_file/df_all.pkl')
    df = pd.read_pickle(df_all_subjects_pickle_file)

    # build lookup dict for unimodal data
    X_file_template = 'X_file/_in_data_name_{in_data_name}/vectorized_aggregated_data.npy'
    info_file_template = 'unimodal_backprojection_info_file/_in_data_name_{in_data_name}/unimodal_backprojection_info.pkl'
    unimodal_lookup_dict = {}
    for k in in_data_name_list_unique:
        unimodal_lookup_dict[k] = {'X_file': os.path.join(aggregated_subjects_dir, X_file_template.format(
            in_data_name=k)),
                                   'unimodal_backprojection_info_file': os.path.join(aggregated_subjects_dir,
                                                                                     info_file_template.format(
                                                                                         in_data_name=k))
                                   }



    ###############################################################################################################
    # AGGREGATE MULTIMODAL METRICS
    # stack single modality arrays horizontally
    aggregate_multimodal_metrics = Node(util.Function(input_names=['multimodal_list', 'unimodal_lookup_dict'],
                                                      output_names=['X_multimodal_file',
                                                                    'multimodal_backprojection_info',
                                                                    'multimodal_name'],
                                                      function=aggregate_multimodal_metrics_fct),
                                        name='aggregate_multimodal_metrics')
    wf.connect(multimodal_in_data_name_infosource, 'multimodal_in_data_name', aggregate_multimodal_metrics,
               'multimodal_list')
    aggregate_multimodal_metrics.inputs.unimodal_lookup_dict = unimodal_lookup_dict



    ###############################################################################################################
    # GET INDEXER FOR SUBJECTS OF INTEREST (as defined by selection criterium)
    select_subjects = Node(util.Function(input_names=['df_all_subjects_pickle_file',
                                                      'subjects_selection_crit_dict',
                                                      'selection_criterium'],
                                         output_names=['df_use_file',
                                                       'df_use_pickle_file',
                                                       'subjects_selection_index'],
                                         function=select_subjects_fct),
                           name='select_subjects')

    select_subjects.inputs.df_all_subjects_pickle_file = df_all_subjects_pickle_file
    select_subjects.inputs.subjects_selection_crit_dict = subjects_selection_crit_dict
    wf.connect(subject_selection_infosource, 'selection_criterium', select_subjects, 'selection_criterium')



    ###############################################################################################################
    # SELECT MULITMODAL X
    # select subjects (rows) from multimodal X according indexer
    select_multimodal_X = Node(util.Function(input_names=['X_multimodal_file', 'subjects_selection_index',
                                                          'selection_criterium'],
                                             output_names=['X_multimodal_selected_file'],
                                             function=select_multimodal_X_fct),
                               name='select_multimodal_X')
    wf.connect(aggregate_multimodal_metrics, 'X_multimodal_file', select_multimodal_X, 'X_multimodal_file')
    wf.connect(select_subjects, 'subjects_selection_index', select_multimodal_X, 'subjects_selection_index')



    ###############################################################################################################
    # RUN PREDICTION
    #
    prediction_node_dict = {}
    select_trained_model_node_dict = {}

    prediction = Node(util.Function(input_names=['trained_model_file',
                                                 'X_file',
                                                 'target_name',
                                                 'selection_criterium',
                                                 'df_file',
                                                 'data_str',
                                                 'regress_confounds'],
                                    output_names=['scatter_file',
                                                  'brain_age_scatter_file',
                                                  'df_use_file',
                                                  'df_res_out_file'],
                                    function=run_prediction_from_trained_model_fct),
                      name='prediction')

    def rep(s):
        return s.replace('__', '.')

    select_trained_model = Node(nio.SelectFiles(trained_model_template), 'select_trained_model')

    i = 0

    for reg in confound_regression:
        the_out_node_str = 'single_source_model_reg_%s_' % reg

        select_trained_model_node_dict[i] = select_trained_model.clone(
            the_out_node_str + 'select_trained_model')
        select_trained_model_node_dict[i].inputs.base_directory = trained_model_dir
        select_trained_model_node_dict[i].inputs.ana_stream = the_out_node_str

        wf.connect(target_infosource, 'target_name', select_trained_model_node_dict[i], 'target_name')
        wf.connect(aggregate_multimodal_metrics, ('multimodal_name', rep),
                   select_trained_model_node_dict[i],
                   'multimodal_in_data_name')

        prediction_node_dict[i] = prediction.clone(the_out_node_str)
        the_in_node = prediction_node_dict[i]
        the_in_node.inputs.regress_confounds = reg

        wf.connect(select_trained_model_node_dict[i], 'trained_model', the_in_node, 'trained_model_file')
        wf.connect(select_multimodal_X, 'X_multimodal_selected_file', the_in_node, 'X_file')
        wf.connect(target_infosource, 'target_name', the_in_node, 'target_name')
        wf.connect(subject_selection_infosource, 'selection_criterium', the_in_node, 'selection_criterium')
        wf.connect(select_subjects, 'df_use_pickle_file', the_in_node, 'df_file')
        wf.connect(aggregate_multimodal_metrics, 'multimodal_name', the_in_node, 'data_str')

        wf.connect(the_in_node, 'scatter_file', ds_pdf, the_out_node_str + 'scatter')
        wf.connect(the_in_node, 'brain_age_scatter_file', ds_pdf, the_out_node_str + 'brain_age_scatter')
        wf.connect(the_in_node, 'df_use_file', ds_pdf, the_out_node_str + 'predicted')
        wf.connect(the_in_node, 'df_res_out_file', ds_pdf, the_out_node_str + 'results_error')

        i += 1



    ###############################################################################################################
    #  RUN WF
    wf.write_graph(dotfilename=wf.name, graph2use='colored', format='pdf')  # 'hierarchical')
    wf.write_graph(dotfilename=wf.name, graph2use='orig', format='pdf')
    wf.write_graph(dotfilename=wf.name, graph2use='flat', format='pdf')

    if plugin_name == 'CondorDAGMan':
        wf.run(plugin=plugin_name)
    if plugin_name == 'MultiProc':
        wf.run(plugin=plugin_name, plugin_args={'n_procs': use_n_procs})
Exemplo n.º 12
0
    img_out.to_filename(out_file)
    out_files.append(out_file)

    return out_files

#Function node to filter both smooth and unsmooth
bandpass = Node(Function(input_names=['in_files','lp','hp','TR'],
                         output_names= 'out_files',
                         function = bpfilter,imports=imports),name='bandpass')

bandpass.inputs.lp = 0.01
bandpass.inputs.hp = 0.1
bandpass.inputs.TR = TR

bandpass_unsmooth = bandpass.clone('bandpass_unsmooth')

#create the preprocessing workflow
preproc = Workflow(name='preproc3')
preproc.base_dir = os.path.join(experiment_dir,working_dir)

preproc.connect([(infosource,selectfiles,[('subject_id','subject_id')]),
                 #(infosource,selectfiles_segment, [('subject_id','subject_id')]),
            (selectfiles,merge,[('func','in_files')]),
            (realign,meanfunc,[('modified_in_files','in_file')]),
            (selectfiles,segment,[('anat','channel_files')]),
            (meanfunc,bet_func,[('out_file','in_file')]),
            (selectfiles,bet_struct,[('anat','in_file')]),
            (bet_func,coregister,[('out_file','target')]),
            (bet_struct,coregister,[('out_file','source')]),
            (coregister,normalize_struct,[('coregistered_source','apply_to_files')]),
Exemplo n.º 13
0
            ('pvc_labels.roi4DMaskFile', 'pvc.mask_file'),
            ('add.out_file', 'ROImeans.labelImgFile'),
            ('add.out_file', 'ROI_Q3.labelImgFile'),
        ])
    ])

# 7. MNI SPACE
# Quick registration to MNI template
mri_to_mni = Node(interface=fsl.FLIRT(dof=12, reference=template),
                  name="mri_to_mni")
mergexfm = Node(interface=fsl.ConvertXFM(concat_xfm=True), name="mergexfm")

transform_pet = Node(interface=fsl.ApplyXFM(apply_xfm=True,
                                            reference=template),
                     name='transform_pet')
transform_suvr = transform_pet.clone(name='transform_suvr')

smooth_suvr = Node(interface=fsl.Smooth(fwhm=args.smooth_fwhm),
                   name="smooth_suvr")

mask_suvr = Node(interface=fsl.ImageMaths(op_string=' -mul ',
                                          suffix='_mul',
                                          in_file2=template_brainmask),
                 name='mask_suvr')

suvr_qc = Node(interface=triplanar_snapshots(alpha=.5,
                                             x=81,
                                             y=93,
                                             z=77,
                                             vmin=0.0,
                                             vmax=2.5),
Exemplo n.º 14
0
def make_func_subcortical_masks(name='func_subcortical'):

    # Define Workflow
    flow = Workflow(name=name)
    inputnode = Node(util.IdentityInterface(fields=['func_first']),
                     name='inputnode')
    outputnode = Node(util.IdentityInterface(fields=[
        'left_nacc', 'left_amygdala', 'left_caudate', 'left_hipoocampus',
        'left_pallidum', 'left_putamen', 'left_thalamus', 'right_nacc',
        'right_amygdala', 'right_caudate', 'right_hipoocampus',
        'right_pallidum', 'right_putamen', 'right_thalamus', 'midbrain',
        'right_striatum', 'left_striatum'
    ]),
                      name='outputnode')

    left_nacc = Node(interface=fsl.ExtractROI(), name='left_nacc')
    left_nacc.inputs.t_min = 0
    left_nacc.inputs.t_size = 1
    left_nacc.inputs.roi_file = 'left_nacc.nii.gz'

    left_amygdala = Node(interface=fsl.ExtractROI(), name='left_amygdala')
    left_amygdala.inputs.t_min = 1
    left_amygdala.inputs.t_size = 1
    left_amygdala.inputs.roi_file = 'left_amygdala.nii.gz'

    left_caudate = Node(interface=fsl.ExtractROI(), name='left_caudate')
    left_caudate.inputs.t_min = 2
    left_caudate.inputs.t_size = 1
    left_caudate.inputs.roi_file = 'left_caudate.nii.gz'

    left_hipoocampus = Node(interface=fsl.ExtractROI(),
                            name='left_hipoocampus')
    left_hipoocampus.inputs.t_min = 3
    left_hipoocampus.inputs.t_size = 1
    left_hipoocampus.inputs.roi_file = 'left_hipoocampus.nii.gz'

    left_pallidum = Node(interface=fsl.ExtractROI(), name='left_pallidum')
    left_pallidum.inputs.t_min = 4
    left_pallidum.inputs.t_size = 1
    left_pallidum.inputs.roi_file = 'left_pallidum.nii.gz'

    left_putamen = Node(interface=fsl.ExtractROI(), name='left_putamen')
    left_putamen.inputs.t_min = 5
    left_putamen.inputs.t_size = 1
    left_putamen.inputs.roi_file = 'left_putamen.nii.gz'

    left_thalamus = Node(interface=fsl.ExtractROI(), name='left_thalamus')
    left_thalamus.inputs.t_min = 6
    left_thalamus.inputs.t_size = 1
    left_thalamus.inputs.roi_file = 'left_thalamus.nii.gz'

    ###############

    right_nacc = Node(interface=fsl.ExtractROI(), name='right_nacc')
    right_nacc.inputs.t_min = 7
    right_nacc.inputs.t_size = 1
    right_nacc.inputs.roi_file = 'right_nacc.nii.gz'

    right_amygdala = Node(interface=fsl.ExtractROI(), name='right_amygdala')
    right_amygdala.inputs.t_min = 8
    right_amygdala.inputs.t_size = 1
    right_amygdala.inputs.roi_file = 'right_amygdala.nii.gz'

    right_caudate = Node(interface=fsl.ExtractROI(), name='right_caudate')
    right_caudate.inputs.t_min = 9
    right_caudate.inputs.t_size = 1
    right_caudate.inputs.roi_file = 'right_caudate.nii.gz'

    right_hipoocampus = Node(interface=fsl.ExtractROI(),
                             name='right_hipoocampus')
    right_hipoocampus.inputs.t_min = 10
    right_hipoocampus.inputs.t_size = 1
    right_hipoocampus.inputs.roi_file = 'right_hipoocampus.nii.gz'

    right_pallidum = Node(interface=fsl.ExtractROI(), name='right_pallidum')
    right_pallidum.inputs.t_min = 11
    right_pallidum.inputs.t_size = 1
    right_pallidum.inputs.roi_file = 'right_pallidum.nii.gz'

    right_putamen = Node(interface=fsl.ExtractROI(), name='right_putamen')
    right_putamen.inputs.t_min = 12
    right_putamen.inputs.t_size = 1
    right_putamen.inputs.roi_file = 'right_putamen.nii.gz'

    right_thalamus = Node(interface=fsl.ExtractROI(), name='right_thalamus')
    right_thalamus.inputs.t_min = 13
    right_thalamus.inputs.t_size = 1
    right_thalamus.inputs.roi_file = 'right_thalamus.nii.gz'

    midbrain = Node(interface=fsl.ExtractROI(), name='midbrain')
    midbrain.inputs.t_min = 14
    midbrain.inputs.t_size = 1
    midbrain.inputs.roi_file = 'midbrain.nii.gz'

    flow.connect(inputnode, 'func_first', left_nacc, 'in_file')
    flow.connect(inputnode, 'func_first', left_amygdala, 'in_file')
    flow.connect(inputnode, 'func_first', left_caudate, 'in_file')
    flow.connect(inputnode, 'func_first', left_hipoocampus, 'in_file')
    flow.connect(inputnode, 'func_first', left_pallidum, 'in_file')
    flow.connect(inputnode, 'func_first', left_putamen, 'in_file')
    flow.connect(inputnode, 'func_first', left_thalamus, 'in_file')

    flow.connect(inputnode, 'func_first', right_nacc, 'in_file')
    flow.connect(inputnode, 'func_first', right_amygdala, 'in_file')
    flow.connect(inputnode, 'func_first', right_caudate, 'in_file')
    flow.connect(inputnode, 'func_first', right_hipoocampus, 'in_file')
    flow.connect(inputnode, 'func_first', right_pallidum, 'in_file')
    flow.connect(inputnode, 'func_first', right_putamen, 'in_file')
    flow.connect(inputnode, 'func_first', right_thalamus, 'in_file')
    flow.connect(inputnode, 'func_first', midbrain, 'in_file')

    flow.connect(left_nacc, 'roi_file', outputnode, 'left_nacc')
    flow.connect(left_amygdala, 'roi_file', outputnode, 'left_amygdala')
    flow.connect(left_caudate, 'roi_file', outputnode, 'left_caudate')
    flow.connect(left_hipoocampus, 'roi_file', outputnode, 'left_hipoocampus')
    flow.connect(left_pallidum, 'roi_file', outputnode, 'left_pallidum')
    flow.connect(left_putamen, 'roi_file', outputnode, 'left_putamen')
    flow.connect(left_thalamus, 'roi_file', outputnode, 'left_thalamus')
    flow.connect(right_nacc, 'roi_file', outputnode, 'right_nacc')
    flow.connect(right_amygdala, 'roi_file', outputnode, 'right_amygdala')
    flow.connect(right_caudate, 'roi_file', outputnode, 'right_caudate')
    flow.connect(right_hipoocampus, 'roi_file', outputnode,
                 'right_hipoocampus')
    flow.connect(right_pallidum, 'roi_file', outputnode, 'right_pallidum')
    flow.connect(right_putamen, 'roi_file', outputnode, 'right_putamen')
    flow.connect(right_thalamus, 'roi_file', outputnode, 'right_thalamus')
    flow.connect(midbrain, 'roi_file', outputnode, 'midbrain')

    # add images together
    right_striatum = Node(interface=fsl.MultiImageMaths(),
                          name='right_striatum')
    right_striatum.inputs.op_string = '-add %s -add %s -bin'
    right_striatum.out_file = 'right_striatum.nii.gz'
    list_R_str = Node(util.Function(input_names=['file_1', 'file_2'],
                                    output_names=['list'],
                                    function=return_list),
                      name='list_str_r')

    flow.connect(right_pallidum, 'roi_file', list_R_str, 'file_1')
    flow.connect(right_putamen, 'roi_file', list_R_str, 'file_2')
    flow.connect(right_caudate, 'roi_file', right_striatum, 'in_file')
    flow.connect(list_R_str, 'list', right_striatum, 'operand_files')
    flow.connect(right_striatum, 'out_file', outputnode, 'right_striatum')

    left_striatum = Node(interface=fsl.MultiImageMaths(), name='left_striatum')
    left_striatum.inputs.op_string = '-add %s -add %s'
    left_striatum.out_file = 'left_striatum.nii.gz'
    list_L_str = list_R_str.clone('list_str_l')

    flow.connect(left_pallidum, 'roi_file', list_L_str, 'file_1')
    flow.connect(left_putamen, 'roi_file', list_L_str, 'file_2')
    flow.connect(left_caudate, 'roi_file', left_striatum, 'in_file')
    flow.connect(list_L_str, 'list', left_striatum, 'operand_files')
    flow.connect(left_striatum, 'out_file', outputnode, 'left_striatum')

    return flow
Exemplo n.º 15
0
# robust min max of fields
def makelist(file1, file2):
    filelist=[file1,file2]
    return filelist

make_list=Node(util.Function(input_names=['file1', 'file2'],
                             output_names=['filelist'],
                             function=makelist),
               name='make_list')

min_max = MapNode(fsl.ImageStats(op_string='-r'),
                  iterfield=['in_file'],
                  name='min_max')

min_max_txt = corr_fields_txt.clone(name='min_max_txt')
min_max_txt.inputs.filename='min_max_fields.txt'

simulated.connect([(simulation, make_list, [('outputnode.nonlin_field_masked', 'file1'),
                                            ('outputnode.fmap_field_masked', 'file2')]),
                   (make_list, min_max, [('filelist', 'in_file')]),
                   (min_max, min_max_txt, [('out_stat', 'stats')])])



# correlation of different corrections to groundtruth
def makelist2(file1, file2, file3):
    filelist=[file1,file2,file3]
    return filelist

make_list2=Node(util.Function(input_names=['file1', 'file2', 'file3'],
Exemplo n.º 16
0

#========================================================================================================
# In[14]:

# apply the trasnfromation to all the EPI volumes

fa_2_atlas = Node(ants.ApplyTransforms(), name = 'fa_2_atlas')
fa_2_atlas.inputs.dimension = 3

fa_2_atlas.inputs.input_image_type = 3
fa_2_atlas.inputs.num_threads = 1
fa_2_atlas.inputs.float = True
fa_2_atlas.inputs.reference_image = '/home/in/aeed/fsl/fsl/data/atlases/JHU/JHU-ICBM-FA-2mm.nii.gz'
#========================================================================================================
md_2_atlas = fa_2_atlas.clone(name = 'md_2_atlas')
rd_2_atlas = fa_2_atlas.clone(name = 'rd_2_atlas')
ad_2_atlas = fa_2_atlas.clone(name = 'ad_2_atlas')


ball_stick_ICVF_2_atlas = fa_2_atlas.clone(name = 'ball_stick_ICVF_2_atlas')
ball_stick_ECVF_2_atlas = fa_2_atlas.clone(name = 'ball_stick_ECVF_2_atlas')

noddi_mu_2_atlas = fa_2_atlas.clone(name = 'noddi_mu_2_atlas')
noddi_odi_2_atlas = fa_2_atlas.clone(name = 'noddi_odi_2_atlas')
noddi_ICVF_2_atlas = fa_2_atlas.clone(name = 'noddi_ICVF_2_atlas')
noddi_ECVF_2_atlas = fa_2_atlas.clone(name = 'noddi_ECVF_2_atlas')
noddi_ISOVF_2_atlas = fa_2_atlas.clone(name = 'noddi_ISOVF_2_atlas')


mcmdi_lambda_2_atlas = fa_2_atlas.clone(name = 'mcmdi_lambda_2_atlas')
Exemplo n.º 17
0
def make_func_subcortical_masks(name = 'func_subcortical'):


    # Define Workflow
    flow        = Workflow(name=name)
    inputnode   = Node(util.IdentityInterface(fields=['func_first']),
                           name='inputnode')
    outputnode  = Node(util.IdentityInterface(fields=['left_nacc', 'left_amygdala',  'left_caudate',  'left_hipoocampus',  'left_pallidum',  'left_putamen', 'left_thalamus',
                                                      'right_nacc','right_amygdala', 'right_caudate', 'right_hipoocampus', 'right_pallidum', 'right_putamen','right_thalamus',
                                                      'midbrain', 'right_striatum', 'left_striatum']),
                           name = 'outputnode')

    left_nacc = Node(interface=fsl.ExtractROI(), name = 'left_nacc')
    left_nacc.inputs.t_min  = 0
    left_nacc.inputs.t_size = 1
    left_nacc.inputs.roi_file = 'left_nacc.nii.gz'

    left_amygdala = Node(interface=fsl.ExtractROI(), name = 'left_amygdala')
    left_amygdala.inputs.t_min  = 1
    left_amygdala.inputs.t_size = 1
    left_amygdala.inputs.roi_file = 'left_amygdala.nii.gz'

    left_caudate = Node(interface=fsl.ExtractROI(), name = 'left_caudate')
    left_caudate.inputs.t_min  = 2
    left_caudate.inputs.t_size = 1
    left_caudate.inputs.roi_file = 'left_caudate.nii.gz'

    left_hipoocampus = Node(interface=fsl.ExtractROI(), name = 'left_hipoocampus')
    left_hipoocampus.inputs.t_min  = 3
    left_hipoocampus.inputs.t_size = 1
    left_hipoocampus.inputs.roi_file = 'left_hipoocampus.nii.gz'

    left_pallidum = Node(interface=fsl.ExtractROI(), name = 'left_pallidum')
    left_pallidum.inputs.t_min  = 4
    left_pallidum.inputs.t_size = 1
    left_pallidum.inputs.roi_file = 'left_pallidum.nii.gz'

    left_putamen = Node(interface=fsl.ExtractROI(), name = 'left_putamen')
    left_putamen.inputs.t_min  = 5
    left_putamen.inputs.t_size = 1
    left_putamen.inputs.roi_file = 'left_putamen.nii.gz'

    left_thalamus = Node(interface=fsl.ExtractROI(), name = 'left_thalamus')
    left_thalamus.inputs.t_min  = 6
    left_thalamus.inputs.t_size = 1
    left_thalamus.inputs.roi_file = 'left_thalamus.nii.gz'

    ###############

    right_nacc = Node(interface=fsl.ExtractROI(), name = 'right_nacc')
    right_nacc.inputs.t_min  = 7
    right_nacc.inputs.t_size = 1
    right_nacc.inputs.roi_file = 'right_nacc.nii.gz'

    right_amygdala = Node(interface=fsl.ExtractROI(), name = 'right_amygdala')
    right_amygdala.inputs.t_min  = 8
    right_amygdala.inputs.t_size = 1
    right_amygdala.inputs.roi_file = 'right_amygdala.nii.gz'

    right_caudate = Node(interface=fsl.ExtractROI(), name = 'right_caudate')
    right_caudate.inputs.t_min  = 9
    right_caudate.inputs.t_size = 1
    right_caudate.inputs.roi_file = 'right_caudate.nii.gz'

    right_hipoocampus = Node(interface=fsl.ExtractROI(), name = 'right_hipoocampus')
    right_hipoocampus.inputs.t_min  = 10
    right_hipoocampus.inputs.t_size = 1
    right_hipoocampus.inputs.roi_file = 'right_hipoocampus.nii.gz'

    right_pallidum = Node(interface=fsl.ExtractROI(), name = 'right_pallidum')
    right_pallidum.inputs.t_min  = 11
    right_pallidum.inputs.t_size = 1
    right_pallidum.inputs.roi_file = 'right_pallidum.nii.gz'

    right_putamen = Node(interface=fsl.ExtractROI(), name = 'right_putamen')
    right_putamen.inputs.t_min  = 12
    right_putamen.inputs.t_size = 1
    right_putamen.inputs.roi_file = 'right_putamen.nii.gz'

    right_thalamus = Node(interface=fsl.ExtractROI(), name = 'right_thalamus')
    right_thalamus.inputs.t_min  = 13
    right_thalamus.inputs.t_size = 1
    right_thalamus.inputs.roi_file = 'right_thalamus.nii.gz'

    midbrain = Node(interface=fsl.ExtractROI(), name = 'midbrain')
    midbrain.inputs.t_min  = 14
    midbrain.inputs.t_size = 1
    midbrain.inputs.roi_file = 'midbrain.nii.gz'

    flow.connect( inputnode  ,   'func_first'   ,   left_nacc,       'in_file'     )
    flow.connect( inputnode  ,   'func_first'   ,   left_amygdala,   'in_file'     )
    flow.connect( inputnode  ,   'func_first'   ,   left_caudate,    'in_file'     )
    flow.connect( inputnode  ,   'func_first'   ,   left_hipoocampus,'in_file'     )
    flow.connect( inputnode  ,   'func_first'   ,   left_pallidum,   'in_file'     )
    flow.connect( inputnode  ,   'func_first'   ,   left_putamen,    'in_file'     )
    flow.connect( inputnode  ,   'func_first'   ,   left_thalamus,   'in_file'     )

    flow.connect( inputnode  ,   'func_first'   ,   right_nacc,       'in_file'     )
    flow.connect( inputnode  ,   'func_first'   ,   right_amygdala,   'in_file'     )
    flow.connect( inputnode  ,   'func_first'   ,   right_caudate,    'in_file'     )
    flow.connect( inputnode  ,   'func_first'   ,   right_hipoocampus,'in_file'     )
    flow.connect( inputnode  ,   'func_first'   ,   right_pallidum,   'in_file'     )
    flow.connect( inputnode  ,   'func_first'   ,   right_putamen,    'in_file'     )
    flow.connect( inputnode  ,   'func_first'   ,   right_thalamus,   'in_file'     )
    flow.connect( inputnode  ,   'func_first'   ,   midbrain,         'in_file'     )

    flow.connect( left_nacc        ,   'roi_file'   ,outputnode   ,   'left_nacc'       )
    flow.connect( left_amygdala    ,   'roi_file'   ,outputnode   ,   'left_amygdala'   )
    flow.connect( left_caudate     ,   'roi_file'   ,outputnode   ,   'left_caudate'    )
    flow.connect( left_hipoocampus ,   'roi_file'   ,outputnode   ,   'left_hipoocampus')
    flow.connect( left_pallidum    ,   'roi_file'   ,outputnode   ,   'left_pallidum')
    flow.connect( left_putamen     ,   'roi_file'   ,outputnode   ,   'left_putamen'    )
    flow.connect( left_thalamus    ,   'roi_file'   ,outputnode   ,   'left_thalamus'   )
    flow.connect( right_nacc       ,   'roi_file'   ,outputnode   ,   'right_nacc'       )
    flow.connect( right_amygdala   ,   'roi_file'   ,outputnode   ,   'right_amygdala'   )
    flow.connect( right_caudate    ,   'roi_file'   ,outputnode   ,   'right_caudate'    )
    flow.connect( right_hipoocampus,   'roi_file'   ,outputnode   ,   'right_hipoocampus')
    flow.connect( right_pallidum   ,   'roi_file'   ,outputnode   ,   'right_pallidum')
    flow.connect( right_putamen    ,   'roi_file'   ,outputnode   ,   'right_putamen'    )
    flow.connect( right_thalamus   ,   'roi_file'   ,outputnode   ,   'right_thalamus'   )
    flow.connect( midbrain         ,   'roi_file'   ,outputnode   ,   'midbrain'         )


    # add images together
    right_striatum = Node(interface=fsl.MultiImageMaths(), name = 'right_striatum')
    right_striatum.inputs.op_string = '-add %s -add %s -bin'
    right_striatum.out_file         = 'right_striatum.nii.gz'
    list_R_str = Node(util.Function(input_names = ['file_1', 'file_2'],
                                    output_names= ['list'],
                                    function    = return_list),
                                    name        = 'list_str_r')

    flow.connect( right_pallidum     ,   'roi_file'   ,list_R_str       ,   'file_1'         )
    flow.connect( right_putamen      ,   'roi_file'   ,list_R_str       ,   'file_2'         )
    flow.connect( right_caudate      ,   'roi_file'   ,right_striatum   ,   'in_file'        )
    flow.connect( list_R_str         ,   'list'       ,right_striatum   ,   'operand_files'  )
    flow.connect( right_striatum     ,   'out_file'   ,outputnode       ,   'right_striatum' )


    left_striatum = Node(interface=fsl.MultiImageMaths(), name = 'left_striatum')
    left_striatum.inputs.op_string = '-add %s -add %s'
    left_striatum.out_file         = 'left_striatum.nii.gz'
    list_L_str =  list_R_str.clone('list_str_l')

    flow.connect( left_pallidum     ,   'roi_file'   ,list_L_str       ,   'file_1'         )
    flow.connect( left_putamen      ,   'roi_file'   ,list_L_str       ,   'file_2'         )
    flow.connect( left_caudate      ,   'roi_file'   ,left_striatum    ,   'in_file'        )
    flow.connect( list_L_str        ,   'list'       ,left_striatum    ,   'operand_files'  )
    flow.connect( left_striatum     ,   'out_file'   ,outputnode       ,   'left_striatum'  )


    return flow
Exemplo n.º 18
0
                      interface=Function(
                          input_names=['zstat', 'volume', 'dlh'],
                          output_names=['threshold_file', 'masked_zstat'],
                          function=cluster_zstats))

#==========================================================================================================================================================
#Move the images to MNI space with precalculated transformations
unthresh_2_MNI = Node(ants.ApplyTransforms(), name='unthresh_2_MNI')
unthresh_2_MNI.inputs.reference_image = MNI_1mm
unthresh_2_MNI.inputs.transforms = '/Volumes/Amr_1TB/NARPS/narps_to_MNI_1mm_Composite.h5'
unthresh_2_MNI.inputs.dimension = 3
unthresh_2_MNI.inputs.output_image = 'unthreshold_file_MNI.nii.gz'

#==========================================================================================================================================================
#threshold the maps to 3.1 to make it ready for submission
thresh_2_MNI = unthresh_2_MNI.clone(name='thresh_2_MNI')

#==========================================================================================================================================================
#overlay thresh_zstat1

overlay_zstat = Node(fsl.Overlay(), name='overlay')
overlay_zstat.inputs.auto_thresh_bg = True
overlay_zstat.inputs.stat_thresh = (3.1, 10)  #threshold positive and negative
overlay_zstat.inputs.transparency = True
overlay_zstat.inputs.out_file = 'rendered_thresh_zstat.nii.gz'
overlay_zstat.inputs.show_negative_stats = True
overlay_zstat.inputs.background_image = MNI_1mm

#==========================================================================================================================================================
#generate pics thresh_zstat1
def learning_predict_data_wf(working_dir,
                             ds_dir,
                             trained_model_dir,
                             in_data_name_list,
                             subjects_selection_crit_dict,
                             subjects_selection_crit_names_list,
                             aggregated_subjects_dir,
                             target_list,
                             trained_model_template,
                             use_n_procs,
                             plugin_name,
                             confound_regression=[False, True]):
    # trained_model_template = {
    # 'trained_model': 'learning_out/group_learning_prepare_data/{ana_stream}trained_model/' +
    #                  '_multimodal_in_data_name_{multimodal_in_data_name}/_selection_criterium_bothSexes_neuH/' +
    #                  '_target_name_{target_name}/trained_model.pkl'}

    import os
    from nipype import config
    from nipype.pipeline.engine import Node, Workflow
    import nipype.interfaces.utility as util
    import nipype.interfaces.io as nio
    from itertools import chain
    from learning_utils import aggregate_multimodal_metrics_fct, run_prediction_from_trained_model_fct, \
        select_subjects_fct, select_multimodal_X_fct
    import pandas as pd

    ###############################################################################################################
    # GENERAL SETTINGS

    wf = Workflow(name='learning_predict_data_from_trained_model_wf')
    wf.base_dir = os.path.join(working_dir)

    nipype_cfg = dict(logging=dict(workflow_level='DEBUG'),
                      execution={
                          'stop_on_first_crash': False,
                          'remove_unnecessary_outputs': False,
                          'job_finished_timeout': 120
                      })
    config.update_config(nipype_cfg)
    wf.config['execution']['crashdump_dir'] = os.path.join(
        working_dir, 'crash')

    ds = Node(nio.DataSink(), name='ds')
    ds.inputs.base_directory = os.path.join(ds_dir,
                                            'group_learning_prepare_data')

    ds.inputs.regexp_substitutions = [
        # ('subject_id_', ''),
        ('_parcellation_', ''),
        ('_bp_freqs_', 'bp_'),
        ('_extraction_method_', ''),
        ('_subject_id_[A0-9]*/', '')
    ]
    ds_pdf = Node(nio.DataSink(), name='ds_pdf')
    ds_pdf.inputs.base_directory = os.path.join(ds_dir, 'pdfs')
    ds_pdf.inputs.parameterization = False

    ###############################################################################################################
    # ensure in_data_name_list is list of lists
    in_data_name_list = [
        i if type(i) == list else [i] for i in in_data_name_list
    ]
    in_data_name_list_unique = list(set(
        chain.from_iterable(in_data_name_list)))

    ###############################################################################################################
    # SET ITERATORS

    in_data_name_infosource = Node(
        util.IdentityInterface(fields=['in_data_name']),
        name='in_data_name_infosource')
    in_data_name_infosource.iterables = ('in_data_name',
                                         in_data_name_list_unique)

    multimodal_in_data_name_infosource = Node(
        util.IdentityInterface(fields=['multimodal_in_data_name']),
        name='multimodal_in_data_name_infosource')
    multimodal_in_data_name_infosource.iterables = ('multimodal_in_data_name',
                                                    in_data_name_list)

    subject_selection_infosource = Node(
        util.IdentityInterface(fields=['selection_criterium']),
        name='subject_selection_infosource')
    subject_selection_infosource.iterables = (
        'selection_criterium', subjects_selection_crit_names_list)

    target_infosource = Node(util.IdentityInterface(fields=['target_name']),
                             name='target_infosource')
    target_infosource.iterables = ('target_name', target_list)

    ###############################################################################################################
    # GET INFO AND SELECT FILES
    df_all_subjects_pickle_file = os.path.join(
        aggregated_subjects_dir, 'df_all_subjects_pickle_file/df_all.pkl')
    df = pd.read_pickle(df_all_subjects_pickle_file)

    # build lookup dict for unimodal data
    X_file_template = 'X_file/_in_data_name_{in_data_name}/vectorized_aggregated_data.npy'
    info_file_template = 'unimodal_backprojection_info_file/_in_data_name_{in_data_name}/unimodal_backprojection_info.pkl'
    unimodal_lookup_dict = {}
    for k in in_data_name_list_unique:
        unimodal_lookup_dict[k] = {
            'X_file':
            os.path.join(aggregated_subjects_dir,
                         X_file_template.format(in_data_name=k)),
            'unimodal_backprojection_info_file':
            os.path.join(aggregated_subjects_dir,
                         info_file_template.format(in_data_name=k))
        }

    ###############################################################################################################
    # AGGREGATE MULTIMODAL METRICS
    # stack single modality arrays horizontally
    aggregate_multimodal_metrics = Node(util.Function(
        input_names=['multimodal_list', 'unimodal_lookup_dict'],
        output_names=[
            'X_multimodal_file', 'multimodal_backprojection_info',
            'multimodal_name'
        ],
        function=aggregate_multimodal_metrics_fct),
                                        name='aggregate_multimodal_metrics')
    wf.connect(multimodal_in_data_name_infosource, 'multimodal_in_data_name',
               aggregate_multimodal_metrics, 'multimodal_list')
    aggregate_multimodal_metrics.inputs.unimodal_lookup_dict = unimodal_lookup_dict

    ###############################################################################################################
    # GET INDEXER FOR SUBJECTS OF INTEREST (as defined by selection criterium)
    select_subjects = Node(util.Function(input_names=[
        'df_all_subjects_pickle_file', 'subjects_selection_crit_dict',
        'selection_criterium'
    ],
                                         output_names=[
                                             'df_use_file',
                                             'df_use_pickle_file',
                                             'subjects_selection_index'
                                         ],
                                         function=select_subjects_fct),
                           name='select_subjects')

    select_subjects.inputs.df_all_subjects_pickle_file = df_all_subjects_pickle_file
    select_subjects.inputs.subjects_selection_crit_dict = subjects_selection_crit_dict
    wf.connect(subject_selection_infosource, 'selection_criterium',
               select_subjects, 'selection_criterium')

    ###############################################################################################################
    # SELECT MULITMODAL X
    # select subjects (rows) from multimodal X according indexer
    select_multimodal_X = Node(util.Function(
        input_names=[
            'X_multimodal_file', 'subjects_selection_index',
            'selection_criterium'
        ],
        output_names=['X_multimodal_selected_file'],
        function=select_multimodal_X_fct),
                               name='select_multimodal_X')
    wf.connect(aggregate_multimodal_metrics, 'X_multimodal_file',
               select_multimodal_X, 'X_multimodal_file')
    wf.connect(select_subjects, 'subjects_selection_index',
               select_multimodal_X, 'subjects_selection_index')

    ###############################################################################################################
    # RUN PREDICTION
    #
    prediction_node_dict = {}
    select_trained_model_node_dict = {}

    prediction = Node(util.Function(
        input_names=[
            'trained_model_file', 'X_file', 'target_name',
            'selection_criterium', 'df_file', 'data_str', 'regress_confounds'
        ],
        output_names=[
            'scatter_file', 'brain_age_scatter_file', 'df_use_file',
            'df_res_out_file'
        ],
        function=run_prediction_from_trained_model_fct),
                      name='prediction')

    def rep(s):
        return s.replace('__', '.')

    select_trained_model = Node(nio.SelectFiles(trained_model_template),
                                'select_trained_model')

    i = 0

    for reg in confound_regression:
        the_out_node_str = 'single_source_model_reg_%s_' % reg

        select_trained_model_node_dict[i] = select_trained_model.clone(
            the_out_node_str + 'select_trained_model')
        select_trained_model_node_dict[
            i].inputs.base_directory = trained_model_dir
        select_trained_model_node_dict[i].inputs.ana_stream = the_out_node_str

        wf.connect(target_infosource, 'target_name',
                   select_trained_model_node_dict[i], 'target_name')
        wf.connect(aggregate_multimodal_metrics, ('multimodal_name', rep),
                   select_trained_model_node_dict[i],
                   'multimodal_in_data_name')

        prediction_node_dict[i] = prediction.clone(the_out_node_str)
        the_in_node = prediction_node_dict[i]
        the_in_node.inputs.regress_confounds = reg

        wf.connect(select_trained_model_node_dict[i], 'trained_model',
                   the_in_node, 'trained_model_file')
        wf.connect(select_multimodal_X, 'X_multimodal_selected_file',
                   the_in_node, 'X_file')
        wf.connect(target_infosource, 'target_name', the_in_node,
                   'target_name')
        wf.connect(subject_selection_infosource, 'selection_criterium',
                   the_in_node, 'selection_criterium')
        wf.connect(select_subjects, 'df_use_pickle_file', the_in_node,
                   'df_file')
        wf.connect(aggregate_multimodal_metrics, 'multimodal_name',
                   the_in_node, 'data_str')

        wf.connect(the_in_node, 'scatter_file', ds_pdf,
                   the_out_node_str + 'scatter')
        wf.connect(the_in_node, 'brain_age_scatter_file', ds_pdf,
                   the_out_node_str + 'brain_age_scatter')
        wf.connect(the_in_node, 'df_use_file', ds_pdf,
                   the_out_node_str + 'predicted')
        wf.connect(the_in_node, 'df_res_out_file', ds_pdf,
                   the_out_node_str + 'results_error')

        i += 1

    ###############################################################################################################
    #  RUN WF
    wf.write_graph(dotfilename=wf.name, graph2use='colored',
                   format='pdf')  # 'hierarchical')
    wf.write_graph(dotfilename=wf.name, graph2use='orig', format='pdf')
    wf.write_graph(dotfilename=wf.name, graph2use='flat', format='pdf')

    if plugin_name == 'CondorDAGMan':
        wf.run(plugin=plugin_name)
    if plugin_name == 'MultiProc':
        wf.run(plugin=plugin_name, plugin_args={'n_procs': use_n_procs})
Exemplo n.º 20
0
def create_similarity_pipeline(name):

    similarity=Workflow(name=name)

    # inputnode
    inputnode=Node(util.IdentityInterface(fields=['anat_brain',
                                                  'mask',
                                                  'lin_mean',
                                                  'nonlin_mean',
                                                  'fmap_mean',
                                                  'topup_mean',
                                                  'filename'
                                                  ]),
                   name='inputnode')
    
    
    # outputnode                                     
    outputnode=Node(util.IdentityInterface(fields=['textfile']),
                    name='outputnode')

    
    # resample all means to make sure they have the same resolution as reference anatomy 
    resamp_mask = Node(afni.Resample(outputtype='NIFTI_GZ'), name='resample_mask')
    resamp_lin = resamp_mask.clone(name = 'resample_lin')
    resamp_nonlin = resamp_mask.clone(name='resample_nonlin')
    resamp_fmap = resamp_mask.clone(name='resample_fmap')
    resamp_topup = resamp_mask.clone(name='resample_topup')
    
    similarity.connect([(inputnode, resamp_mask, [('mask', 'in_file'),
                                                 ('anat_brain', 'master')]),
                        (inputnode, resamp_lin, [('lin_mean', 'in_file'),
                                                 ('anat_brain', 'master')]),
                        (inputnode, resamp_nonlin, [('nonlin_mean', 'in_file'),
                                                 ('anat_brain', 'master')]),
                        (inputnode, resamp_fmap, [('fmap_mean', 'in_file'),
                                                 ('anat_brain', 'master')]),
                        (inputnode, resamp_topup, [('topup_mean', 'in_file'),
                                                 ('anat_brain', 'master')]),
                        ])
    
    # calculate similarity (all possible metrics) for each methods to mni
    lin_sim = MapNode(interface = nutil.Similarity(),
                      name = 'similarity_lin',
                      iterfield=['metric'])
    lin_sim.inputs.metric = ['mi','nmi','cc','cr','crl1']
    
    nonlin_sim = lin_sim.clone(name='similarity_nonlin')
    nonlin_sim.inputs.metric = ['mi','nmi','cc','cr','crl1']
    fmap_sim = lin_sim.clone(name='similarity_fmap')
    fmap_sim.inputs.metric = ['mi','nmi','cc','cr','crl1']
    topup_sim = lin_sim.clone(name='similarity_topup')
    topup_sim.inputs.metric = ['mi','nmi','cc','cr','crl1']
    
    similarity.connect([(inputnode, lin_sim, [('anat_brain', 'volume1')]),
                        (resamp_lin, lin_sim, [('out_file', 'volume2')]),
                        (resamp_mask, lin_sim, [('out_file', 'mask1'),
                                               ('out_file', 'mask2')]),
                        (inputnode, nonlin_sim, [('anat_brain', 'volume1')]),
                        (resamp_nonlin, nonlin_sim, [('out_file', 'volume2')]),
                        (resamp_mask, nonlin_sim, [('out_file', 'mask1'),
                                                   ('out_file', 'mask2')]),
                        (inputnode, fmap_sim, [('anat_brain', 'volume1')]),
                        (resamp_fmap, fmap_sim, [('out_file', 'volume2')]),
                        (resamp_mask, fmap_sim, [('out_file', 'mask1'),
                                               ('out_file', 'mask2')]),
                        (inputnode, topup_sim, [('anat_brain', 'volume1')]),
                        (resamp_topup, topup_sim, [('out_file', 'volume2')]),
                        (resamp_mask, topup_sim, [('out_file', 'mask1'),
                                               ('out_file', 'mask2')])
                        ])
    
    
    # write values to one text file per subject
    def write_text(lin_metrics, nonlin_metrics, fmap_metrics, topup_metrics, filename):
        import numpy as np
        import os
        lin_array = np.array(lin_metrics)
        lin_array=lin_array.reshape(np.size(lin_array),1)
        nonlin_array = np.array(nonlin_metrics)
        nonlin_array=nonlin_array.reshape(np.size(nonlin_array),1)
        fmap_array = np.array(fmap_metrics)
        fmap_array=fmap_array.reshape(np.size(fmap_array),1)
        topup_array = np.array(topup_metrics)
        topup_array=topup_array.reshape(np.size(topup_array),1)
        metrics=np.concatenate((lin_array, nonlin_array, fmap_array, topup_array),axis=1)
        metrics_file = filename
        np.savetxt(metrics_file, metrics, delimiter=' ', fmt='%f')
        return os.path.abspath(filename)
    
    
    write_txt = Node(interface=Function(input_names=['lin_metrics', 'nonlin_metrics', 'fmap_metrics', 'topup_metrics', 'filename'],
                                      output_names=['txtfile'],
                                      function=write_text),
                  name='write_file')
    
    similarity.connect([(inputnode, write_txt, [('filename', 'filename')]),
                        (lin_sim, write_txt, [('similarity', 'lin_metrics')]),
                        (nonlin_sim, write_txt, [('similarity', 'nonlin_metrics')]),
                        (fmap_sim, write_txt, [('similarity', 'fmap_metrics')]),
                        (topup_sim, write_txt, [('similarity', 'topup_metrics')]),
                        (write_txt, outputnode, [('txtfile', 'textfile')])
                        ])
    
    
    return similarity
FA_to_WAX_Temp.inputs.output_warped_image=True
FA_to_WAX_Temp.inputs.float=True

#>>>>>>>>>>>>>>>>>>>>>>>>>>>MD

antsApplyMD_WAX = Node(ants.ApplyTransforms(), name = 'antsApplyMD_WAX')
antsApplyMD_WAX.inputs.dimension = 3
antsApplyMD_WAX.inputs.input_image_type = 3
antsApplyMD_WAX.inputs.num_threads = 1
antsApplyMD_WAX.inputs.float = True
antsApplyMD_WAX.inputs.output_image = 'MD_{subject_id}.nii'
antsApplyMD_WAX.inputs.reference_image = Wax_FA_Template

#>>>>>>>>>>>>>>>>>>>>>>>>>>>AD

antsApplyAD_WAX = antsApplyMD_WAX.clone(name = 'antsApplyAD_WAX')
antsApplyAD_WAX.inputs.output_image = 'AD_{subject_id}.nii'

#>>>>>>>>>>>>>>>>>>>>>>>>>>>RD

antsApplyRD_WAX = antsApplyMD_WAX.clone(name = 'antsApplyRD_WAX')
antsApplyRD_WAX.inputs.output_image = 'RD_{subject_id}.nii'


#----------------------------------------------------------------------------------------------------
#----------------------------------------------------------------------------------------------------
#----------------------------------------------------------------------------------------------------
# Register to Study template second, just to have both for purposes of comparison

#>>>>>>>>>>>>>>>>>>>>>>>>>>>FA
FA_to_Study_Temp = Node(ants.Registration(), name = 'FA_To_Study_Template')
                 (demean, zstandardize, [('out_file', 'in_file')]),
                 (standev, zstandardize, [('out_stat', 'operand_value')]),
                 ])

### MVPA volume and surface pipeline

# Concatenate Volume node - concatenates all session files into one file
concatVol = Node(Concatenate(), name='concatVol')

# Create volume pipeline
volume_flow = Workflow(name='volume_flow')

## create the surface workflow ##

# Copy the concatenate node from the volume pipeline
concatSurfLH = concatVol.clone('concatSurfLH')
concatSurfRH = concatVol.clone('concatSurfRH')

# Convert NIfTI into mgz
mriconvertSurfLH = Node(MRIConvert(out_type = 'mgz'),name='mriconvertSurfLH')
mriconvertSurfRH = Node(MRIConvert(out_type = 'mgz'),name='mriconvertSurfRH')

# Sample Left node - samples the volume data onto the surface representation
samplerLH = Node(SampleToSurface(hemi='lh',
                                    interp_method = 'trilinear',
                                    sampling_method = 'average',
                                    sampling_range = (0.1, 0.9, 0.1),
                                    sampling_units = 'frac'),
                 name='samplerLH')

# Sample Right node - does the same as the node above,