Beispiel #1
0
def create_workflow_hrfpattern_spm():

    # GLM
    design = Node(interface=spm_design(), name='design_glm')
    design.inputs.timing_units = 'secs'
    design.inputs.interscan_interval = .85
    design.inputs.bases = {'hrf': {'derivs': [0, 0]}}

    estimate = Node(interface=EstimateModel(), name="estimate")
    estimate.inputs.estimation_method = {'Classical': 1}

    contrastestimate = Node(interface=EstimateContrast(), name="contrast")
    contrastestimate.inputs.contrasts = [('Visual', 'T', [
        '1',
    ], [
        1,
    ])]

    w = Workflow(name='hrfpattern_spm')
    w.connect(input_node, 'bold', model, 'functional_runs')
    w.connect(input_node, 'events', model, 'bids_event_file')
    w.connect(model, 'session_info', design, 'session_info')
    w.connect(design, 'spm_mat_file', estimate, 'spm_mat_file')
    w.connect(estimate, 'spm_mat_file', contrastestimate, 'spm_mat_file')
    w.connect(estimate, 'beta_images', contrastestimate, 'beta_images')
    w.connect(estimate, 'residual_image', contrastestimate, 'residual_image')
    w.connect(contrastestimate, 'spmT_images', output_node, 'T_image')
    return w
Beispiel #2
0
def prepare_indT_cs():
    outdirs = [
        '/home/peter/Desktop/prepare/rest/output/sec_level/two_sample_con_vs_stroke/alff_analysis/',
        '/home/peter/Desktop/prepare/rest/output/sec_level/two_sample_con_vs_stroke/falff_analysis/'
    ]

    for n, outdir in enumerate(outdirs):

        stroke_hdir = '/home/peter/Desktop/prepare/rest/output/'
        all_stroke_files = glob.glob(stroke_hdir + '/*/f_alff/' + maps[n])
        stroke_files = [x for x in all_stroke_files if '12months' not in x]

        healthy_hdir = '/home/peter/Desktop/Connect/rest/output/'
        all_healthy_files = glob.glob(healthy_hdir + '/D_H*/f_alff/' + maps[n])
        healthy_files = [x for x in all_healthy_files if 'P2' not in x]

        ttest = Node(TwoSampleTTestDesign(), name='TwoSampleT')
        ttest.inputs.group1_files = healthy_files
        ttest.inputs.group2_files = stroke_files

        modelEst = Node(EstimateModel(), name='EstimateModel')
        modelEst.inputs.estimation_method = {'Classical': 1}

        conEst = Node(EstimateContrast(), name='EstimateContrasts')

        con_1 = ('Controls', 'T', ['Group_{1}', 'Group_{2}'], [1.0, 0.0])
        con_2 = ('Patients', 'T', ['Group_{1}', 'Group_{2}'], [0.0, 1.0])
        con_3 = ('Controls>Patients', 'T', ['Group_{1}',
                                            'Group_{2}'], [1.0, -1.0])
        con_4 = ('Patients>Controls', 'T', ['Group_{1}',
                                            'Group_{2}'], [-1.0, 1.0])

        contrasts = [con_1, con_2, con_3, con_4]

        conEst.inputs.contrasts = contrasts
        conEst.inputs.group_contrast = True

        l2analysis = Workflow(name='l2analysis')
        l2analysis.base_dir = outdir

        l2analysis.connect([
            (ttest, modelEst, [('spm_mat_file', 'spm_mat_file')]),
            (modelEst, conEst, [('spm_mat_file', 'spm_mat_file'),
                                ('beta_images', 'beta_images'),
                                ('residual_image', 'residual_image')]),
        ])

        l2analysis.write_graph(graph2use='colored')
        l2analysis.run('MultiProc', plugin_args={'n_procs': 1})
selectderivs = Node(SelectFiles(templates, sort_filelist=True),
                    name='selectderivs')
#selectderivs.inputs.sub_id = subs

# One Sample T-Test Design - creates one sample T-Test Design
onesamplettestdes = Node(
    OneSampleTTestDesign(),
    #overwrite=True,
    name="onesampttestdes")
onesamplettestdes.inputs.explicit_mask_file = mask
# EstimateModel - estimate the parameters of the model
# Even for second level it should be 'Classical': 1.
level2estimate = Node(EstimateModel(estimation_method={'Classical': 1}),
                      name="level2estimate")
# EstimateContrast - estimates simple group contrast
level2conestimate = Node(EstimateContrast(group_contrast=True),
                         name="level2conestimate")
cont1 = ['Group', 'T', ['mean'], [1]]
level2conestimate.inputs.contrasts = [cont1]

## Create the 2nd level pipeline
secondlev = Workflow(name='secondlev', base_dir=out_dir + '/tmp')
secondlev.config['execution'][
    'crashdump_dir'] = base_dir = out_dir + '/tmp/crash_files'

secondlev.connect([
    (infosource, selectderivs, [('contrast_id', 'contrast_id')]),
    (selectderivs, onesamplettestdes, [('cons', 'in_files')]),
    (onesamplettestdes, level2estimate, [('spm_mat_file', 'spm_mat_file')]),
    (level2estimate, level2conestimate, [('spm_mat_file', 'spm_mat_file'),
                                         ('beta_images', 'beta_images'),
def batch_paramatric_GLM(nii_root_dir, sub_num_list, total_session_num,
                         all_sub_dataframe, params_name, contrast_list,
                         cache_folder, result_folder, parallel_cores):

    from nipype import Node, Workflow, Function
    from nipype.interfaces.spm import Level1Design, EstimateModel, EstimateContrast
    from nipype.algorithms.modelgen import SpecifySPMModel
    from nipype.interfaces.utility import IdentityInterface
    from nipype import DataSink

    # Define the helper functions

    def nii_selector(root_dir,
                     sub_num,
                     session_num,
                     all_sub_dataframe,
                     data_type="Smooth_8mm"):
        import os
        import glob
        session_list = ["session" + str(i) for i in range(1, session_num + 1)]
        sub_name = "sub" + str(sub_num)
        # print(file_path)
        nii_list = []
        for s in session_list:
            file_path = os.path.join(root_dir, sub_name, data_type, s)
            nii_list.append(glob.glob(file_path + "/*.nii"))
        single_sub_data = all_sub_dataframe[all_sub_dataframe.Subject_num ==
                                            sub_num]
        return (nii_list, single_sub_data, sub_name)

    def condition_generator(single_sub_data, params_name, duration=2):
        from nipype.interfaces.base import Bunch
        run_num = set(single_sub_data.run)
        subject_info = []
        for i in run_num:
            tmp_table = single_sub_data[single_sub_data.run == i]
            tmp_onset = tmp_table.onset.values.tolist()

            pmod_names = []
            pmod_params = []
            pmod_poly = []
            for param in params_name:
                pmod_params.append(tmp_table[param].values.tolist())
                pmod_names.append(param)
                pmod_poly.append(1)

            tmp_Bunch = Bunch(conditions=["trial_onset_run" + str(i)],
                              onsets=[tmp_onset],
                              durations=[[duration]],
                              pmod=[
                                  Bunch(name=pmod_names,
                                        poly=pmod_poly,
                                        param=pmod_params)
                              ])
            subject_info.append(tmp_Bunch)

        return subject_info

    # Define each Nodes in the workflow

    NiiSelector = Node(Function(
        input_names=[
            "root_dir", "sub_num", "session_num", "all_sub_dataframe",
            "data_type"
        ],
        output_names=["nii_list", "single_sub_data", "sub_name"],
        function=nii_selector),
                       name="NiiSelector")

    ConditionGenerator = Node(Function(
        input_names=["single_sub_data", "params_name", "duration"],
        output_names=["subject_info"],
        function=condition_generator),
                              name="ConditionGenerator")

    glm_input = Node(IdentityInterface(
        fields=['nii_list', 'single_sub_data', 'params_name', 'contrast_list'],
        mandatory_inputs=True),
                     name="glm_input")

    # SpecifyModel - Generates SPM-specific Model
    modelspec = Node(SpecifySPMModel(concatenate_runs=False,
                                     input_units='scans',
                                     output_units='scans',
                                     time_repetition=2,
                                     high_pass_filter_cutoff=128),
                     name="modelspec")

    # Level1Design - Generates an SPM design matrix
    level1design = Node(Level1Design(bases={'hrf': {
        'derivs': [0, 0]
    }},
                                     timing_units='scans',
                                     interscan_interval=2),
                        name="level1design")

    level1estimate = Node(EstimateModel(estimation_method={'Classical': 1}),
                          name="level1estimate")

    level1conest = Node(EstimateContrast(), name="level1conest")

    OutputNode = Node(DataSink(), name="OutputNode")

    # Define the attributes of those nodes

    NiiSelector.inputs.root_dir = nii_root_dir
    NiiSelector.iterables = ("sub_num", sub_num_list)
    NiiSelector.inputs.session_num = total_session_num
    NiiSelector.inputs.data_type = "Smooth_8mm"
    NiiSelector.inputs.all_sub_dataframe = all_sub_dataframe

    glm_input.inputs.params_name = params_name
    glm_input.inputs.contrast_list = contrast_list

    OutputNode.inputs.base_directory = result_folder

    # Define the workflows

    single_sub_GLM_wf = Workflow(name='single_sub_GLM_wf')
    single_sub_GLM_wf.connect([
        (glm_input, ConditionGenerator, [('single_sub_data',
                                          'single_sub_data'),
                                         ('params_name', 'params_name')]),
        (glm_input, modelspec, [('nii_list', 'functional_runs')]),
        (glm_input, level1conest, [('contrast_list', 'contrasts')]),
        (ConditionGenerator, modelspec, [('subject_info', 'subject_info')]),
        (modelspec, level1design, [('session_info', 'session_info')]),
        (level1design, level1estimate, [('spm_mat_file', 'spm_mat_file')]),
        (level1estimate, level1conest, [('spm_mat_file', 'spm_mat_file'),
                                        ('beta_images', 'beta_images'),
                                        ('residual_image', 'residual_image')])
    ])

    batch_GLM_wf = Workflow(name="batch_GLM_wf", base_dir=cache_folder)
    batch_GLM_wf.connect([(NiiSelector, single_sub_GLM_wf, [
        ('nii_list', 'glm_input.nii_list'),
        ('single_sub_data', 'glm_input.single_sub_data')
    ]), (NiiSelector, OutputNode, [('sub_name', 'container')]),
                          (single_sub_GLM_wf, OutputNode,
                           [('level1conest.spm_mat_file', '1stLevel.@spm_mat'),
                            ('level1conest.spmT_images', '1stLevel.@T'),
                            ('level1conest.con_images', '1stLevel.@con'),
                            ('level1conest.spmF_images', '1stLevel.@F'),
                            ('level1conest.ess_images', '1stLevel.@ess')])])

    # Excute the workflow
    batch_GLM_wf.run(plugin='MultiProc',
                     plugin_args={'n_procs': parallel_cores})
Beispiel #5
0
                               interscan_interval=2.0,
                               microtime_resolution=16,
                               microtime_onset=1,
                               bases={'hrf': {
                                   'derivs': [0, 0]
                               }},
                               global_intensity_normalization='none',
                               mask_threshold=0.8,
                               model_serial_correlations='AR(1)',
                               volterra_expansion_order=2),
                  name='model_spec')

est_model = Node(EstimateModel(estimation_method={'Classical': 1}),
                 name='est_model')

est_con = Node(EstimateContrast(), name='est_con')

## CREATE WORKFLOW
# Create a workflow to return the seed nuisance regressors and seed map(s) for a subject
first_level = Workflow(name='first_level')
first_level.base_dir = '/data/mridata/jdeng/tools/first_level/nipype'

# Datasink
substitutions = [('output', '')]
datasink = MapNode(DataSink(parameterization=False,
                            substitutions=substitutions),
                   name="datasink",
                   iterfield=['base_directory', 'container'])


# Helper functions for connections
Beispiel #6
0
                    name='level1design')
level1design.inputs.bases = {'hrf': {'derivs': [0, 0]}}
level1design.inputs.timing_units = 'secs'
level1design.inputs.interscan_interval = TR
firstlev.connect([
    (modelspec, level1design, [('session_info', 'session_info')])
])

level1estimate = Node(EstimateModel(),
                      overwrite=False,
                      name='level1estimate')
level1estimate.inputs.estimation_method = {'Classical': 1}
firstlev.connect(level1design, 'spm_mat_file',
                 level1estimate, 'spm_mat_file')

contrast_estimate = Node(EstimateContrast(),
                         overwrite=False,
                         name='contraste_estimate')
contrast_estimate.config = {'execution': {'remove_unnecessary_outputs': False}}
firstlev.connect([
    (level1estimate, contrast_estimate,
     [('spm_mat_file', 'spm_mat_file'),
      ('beta_images', 'beta_images'),
      ('residual_image', 'residual_image')])
])

contrasts = Node(Function(function=con_setup,
                          input_names=['subject_id'],
                          output_names=['contrasts']),
                 name='contrasts')
firstlev.connect([
# Level1Design - Generates an SPM design matrix
level1design = Node(Level1Design(bases={'hrf': {
    'derivs': [0, 0]
}},
                                 timing_units='secs',
                                 interscan_interval=TR,
                                 model_serial_correlations='AR(1)'),
                    name="level1design")

# EstimateModel - estimate the parameters of the model
level1estimate = Node(EstimateModel(estimation_method={'Classical': 1}),
                      name="level1estimate")

# EstimateContrast - estimates contrasts
conestimate = Node(EstimateContrast(), name="conestimate")

# Volume Transformation - transform contrasts into anatomical space
applyVolReg = MapNode(ApplyVolTransform(fs_target=True),
                      name='applyVolReg',
                      iterfield=['source_file'])

# MRIConvert - to gzip output files
mriconvert = MapNode(MRIConvert(out_type='niigz'),
                     name='mriconvert',
                     iterfield=['in_file'])

# Initiation of the 1st-level analysis workflow
l1analysis = Workflow(name='l1analysis')

# Connect up the 1st-level analysis components
Beispiel #8
0
# Level1Design - Generates an SPM design matrix
level1design = Node(Level1Design(bases={'hrf': {
    'derivs': [1, 0]
}},
                                 timing_units='secs',
                                 interscan_interval=TR,
                                 model_serial_correlations='FAST'),
                    name="level1design")

# EstimateModel - estimate the parameters of the model
level1estimate = Node(EstimateModel(estimation_method={'Classical': 1}),
                      name="level1estimate")

# EstimateContrast - estimates contrasts
level1conest = Node(EstimateContrast(), name="level1conest")

# Condition names
condition_names = ['CueA', 'CueB']

# Contrasts
con_01 = ['average', 'T', condition_names, [1 / 2., 1 / 2.]]
con_02 = ['CueA', 'T', condition_names, [1, 0]]
con_03 = ['CueB', 'T', condition_names, [0, 1]]
con_04 = ['CueA > CueB', 'T', condition_names, [1, -1]]
con_05 = ['CueB > CueA', 'T', condition_names, [-1, 1]]

contrast_list = [con_01, con_02, con_03, con_04, con_05]


def subjectinfo(subject_id):
Beispiel #9
0
def first_level(TR,
                contrast_list,
                subject_list,
                experiment_dir,
                output_dir,
                subjectinfo_func,
                working_dir='workingdir'):
    """define first level model"""
    # SpecifyModel - Generates SPM-specific Model
    modelspec = Node(SpecifySPMModel(concatenate_runs=False,
                                     input_units='secs',
                                     output_units='secs',
                                     time_repetition=TR,
                                     high_pass_filter_cutoff=128),
                     name="modelspec")

    # Level1Design - Generates an SPM design matrix
    level1design = Node(Level1Design(bases={'hrf': {
        'derivs': [0, 0]
    }},
                                     timing_units='secs',
                                     interscan_interval=TR,
                                     model_serial_correlations='FAST'),
                        name="level1design")

    # EstimateModel - estimate the parameters of the model
    level1estimate = Node(EstimateModel(estimation_method={'Classical': 1}),
                          name="level1estimate")

    # EstimateContrast - estimates contrasts
    level1conest = Node(EstimateContrast(), name="level1conest")

    # Get Subject Info - get subject specific condition information
    getsubjectinfo = Node(Function(input_names=['subject_id'],
                                   output_names=['subject_info'],
                                   function=subjectinfo_func),
                          name='getsubjectinfo')

    # Infosource - a function free node to iterate over the list of subject names
    infosource = Node(IdentityInterface(fields=['subject_id', 'contrasts'],
                                        contrasts=contrast_list),
                      name="infosource")
    infosource.iterables = [('subject_id', subject_list)]

    # SelectFiles - to grab the data (alternativ to DataGrabber)
    smooth_dir = opj(experiment_dir, 'smooth_nomask', 'preproc')
    templates = {
        'func': opj(smooth_dir, 'sub-{subject_id}', '*run-*_fwhm-8_bold.nii')
    }

    selectfiles = Node(SelectFiles(templates,
                                   base_directory=experiment_dir,
                                   sort_filelist=True),
                       name="selectfiles")

    # Datasink - creates output folder for important outputs
    datasink = Node(DataSink(base_directory=experiment_dir,
                             container=output_dir),
                    name="datasink")

    # Use the following DataSink output substitutions
    substitutions = [('_subject_id_', 'sub-')]
    datasink.inputs.substitutions = substitutions

    # Initiation of the 1st-level analysis workflow
    l1analysis = Workflow(name='l1analysis')
    l1analysis.base_dir = opj(experiment_dir, working_dir)

    # Connect up the 1st-level analysis components
    l1analysis.connect([
        (infosource, selectfiles, [('subject_id', 'subject_id')]),
        (infosource, getsubjectinfo, [('subject_id', 'subject_id')]),
        (getsubjectinfo, modelspec, [('subject_info', 'subject_info')]),
        (infosource, level1conest, [('contrasts', 'contrasts')]),
        (selectfiles, modelspec, [('func', 'functional_runs')]),
        (modelspec, level1design, [('session_info', 'session_info')]),
        (level1design, level1estimate, [('spm_mat_file', 'spm_mat_file')]),
        (level1estimate, level1conest, [('spm_mat_file', 'spm_mat_file'),
                                        ('beta_images', 'beta_images'),
                                        ('residual_image', 'residual_image')]),
        (level1conest, datasink, [
            ('spm_mat_file', '1stLevel.@spm_mat'),
            ('spmT_images', '1stLevel.@T'),
            ('con_images', '1stLevel.@con'),
            ('spmF_images', '1stLevel.@F'),
            ('ess_images', '1stLevel.@ess'),
        ]),
    ])
    return l1analysis
Beispiel #10
0
def second_level(contrast_list,
                 experiment_dir,
                 first_level_dir,
                 output_dir,
                 mask_path='/data/group_mask.nii.gz',
                 working_dir='workingdir'):
    """define second level model"""
    # Gunzip - unzip the mask image
    gunzip = Node(Gunzip(in_file=mask_path), name="gunzip")

    # OneSampleTTestDesign - creates one sample T-Test Design
    onesamplettestdes = Node(OneSampleTTestDesign(), name="onesampttestdes")

    # EstimateModel - estimates the model
    level2estimate = Node(EstimateModel(estimation_method={'Classical': 1}),
                          name="level2estimate")

    # EstimateContrast - estimates group contrast
    level2conestimate = Node(EstimateContrast(group_contrast=True),
                             name="level2conestimate")
    cont1 = ['Group', 'T', ['mean'], [1]]
    level2conestimate.inputs.contrasts = [cont1]

    # Threshold - thresholds contrasts
    level2thresh = Node(
        Threshold(
            contrast_index=1,
            use_topo_fdr=True,
            # use_fwe_correction=True,
            use_fwe_correction=False,
            extent_threshold=0,
            height_threshold=0.001,
            # height_threshold=0.05,
            height_threshold_type='p-value',
            extent_fdr_p_threshold=0.05),
        name="level2thresh")

    # Infosource - a function free node to iterate over the list of subject names
    infosource = Node(IdentityInterface(fields=['contrast_id']),
                      name="infosource")
    infosource.iterables = [('contrast_id', contrast_list)]

    # SelectFiles - to grab the data (alternativ to DataGrabber)
    firstlev_dir = opj(experiment_dir, first_level_dir, '1stLevel')
    templates = {'cons': opj(firstlev_dir, 'sub-*', '{contrast_id}.nii')}
    selectfiles = Node(SelectFiles(templates,
                                   base_directory=experiment_dir,
                                   sort_filelist=True),
                       name="selectfiles")

    # Datasink - creates output folder for important outputs
    datasink = Node(DataSink(base_directory=experiment_dir,
                             container=output_dir),
                    name="datasink")

    # Use the following DataSink output substitutions
    substitutions = [('_contrast_id_', '')]
    datasink.inputs.substitutions = substitutions

    # Initiation of the 2nd-level analysis workflow
    l2analysis = Workflow(name='l2analysis')
    l2analysis.base_dir = opj(experiment_dir, working_dir)

    # Connect up the 2nd-level analysis components
    l2analysis.connect([
        (infosource, selectfiles, [('contrast_id', 'contrast_id')]),
        (selectfiles, onesamplettestdes, [('cons', 'in_files')]),
        (gunzip, onesamplettestdes, [('out_file', 'explicit_mask_file')]),
        (onesamplettestdes, level2estimate, [('spm_mat_file', 'spm_mat_file')
                                             ]),
        (level2estimate, level2conestimate, [('spm_mat_file', 'spm_mat_file'),
                                             ('beta_images', 'beta_images'),
                                             ('residual_image',
                                              'residual_image')]),
        (level2conestimate, level2thresh, [
            ('spm_mat_file', 'spm_mat_file'),
            ('spmT_images', 'stat_image'),
        ]),
        (level2conestimate, datasink, [('spm_mat_file', '2ndLevel.@spm_mat'),
                                       ('spmT_images', '2ndLevel.@T'),
                                       ('con_images', '2ndLevel.@con')]),
        (level2thresh, datasink, [('thresholded_map', '2ndLevel.@threshold')]),
    ])
    return l2analysis
Beispiel #11
0
def prepare_oneT():
    outdirs = [
        '/home/peter/Desktop/prepare/rest/output/sec_level/one_sample/alff_analysis_noglob/',
        '/home/peter/Desktop/prepare/rest/output/sec_level/one_sample/falff_analysis_noglob/'
    ]

    for n, outdir in enumerate(outdirs):

        stroke_hdir = '/home/peter/Desktop/prepare/rest/output/'
        all_stroke_files = glob.glob(stroke_hdir + '/*/f_alff/' + maps[n])
        stroke_files = [x for x in all_stroke_files if '12months' not in x]
        stroke_files.sort()

        #Convert to DataFrame, cull subs that aren't in preprocessed data
        stroke_files = pd.DataFrame(stroke_files, columns=['files'])
        if n == 0:
            stroke_files['initials'] = os.path.split(
                os.path.split(stroke_files['files'][n])[0])[0][-2:]
        else:
            stroke_files['initials'] = os.path.split(
                os.path.split(stroke_files['files'][n])[0])[0][-2:]
        cull_list = stroke_files['initials'].isin(df['initials'])
        df_cull = df[cull_list]
        df_cull.rename(columns={'Unnamed: 0': 'old_idx'}, inplace=True)
        #Specially remove the second HK (15-28) who refused to die...
        #df_cull = df_cull[df_cull['old_idx'] != 194]
        #df_cull.reset_index(inplace = True, drop = True)
        try:
            np.all(df_cull['initials'] == stroke_files['initials']) == 1
            stroke_files['madrs_3m'] = df_cull['MADRS_score_3mth']
        except:
            print 'Data are not of identical length. Error in script.'

        cov = {
            'vector': stroke_files['madrs_3m'].tolist(),
            'name': 'madrs_3m',
            'interaction': 1,
            'centering': 1
        }

        ttest = Node(OneSampleTTestDesign(), name='OneSampleT')
        ttest.inputs.in_files = stroke_files['files'].tolist()
        ttest.inputs.covariates = cov
        ttest.inputs.explicit_mask_file = mask_file

        modelEst = Node(EstimateModel(), name='EstimateModel')
        modelEst.inputs.estimation_method = {'Classical': 1}

        conEst = Node(EstimateContrast(), name='EstimateContrasts')

        con_1 = ('Stroke', 'T', ['mean', 'madrs_3m'], [1.0, 0.0])
        con_2 = ('Covariate', 'T', ['mean', 'madrs_3m'], [0.0, 1.0])

        contrasts = [con_1, con_2]

        conEst.inputs.contrasts = contrasts
        conEst.inputs.group_contrast = True

        l2analysis = Workflow(name='l2analysis')
        l2analysis.base_dir = outdir

        l2analysis.connect([
            (ttest, modelEst, [('spm_mat_file', 'spm_mat_file')]),
            (modelEst, conEst, [('spm_mat_file', 'spm_mat_file'),
                                ('beta_images', 'beta_images'),
                                ('residual_image', 'residual_image')]),
        ])

        l2analysis.write_graph(graph2use='colored')
        l2analysis.run('MultiProc', plugin_args={'n_procs': 1})
Beispiel #12
0
def prepare_indT_madrs_int(stand):
    outdirs = [
        '/home/peter/Desktop/prepare/rest/output/sec_level/two_sample_madrs_interaction/alff_analysis_noglob/',
        '/home/peter/Desktop/prepare/rest/output/sec_level/two_sample_madrs_interaction/falff_analysis_noglob/',
        '/home/peter/Desktop/prepare/rest/output/sec_level/two_sample_madrs_interaction/falff_slow_5_analysis_noglob/',
        '/home/peter/Desktop/prepare/rest/output/sec_level/two_sample_madrs_interaction/falff_slow_4_analysis_noglob/'
    ]

    #outdirs = ['/home/peter/Desktop/prepare/rest/output/sec_level/two_sample_madrs_interaction/alff_analysis/', '/home/peter/Desktop/prepare/rest/output/sec_level/two_sample_madrs_interaction/falff_analysis/', '/home/peter/Desktop/prepare/rest/output/sec_level/two_sample_madrs_interaction/falff_slow_5_analysis/', '/home/peter/Desktop/prepare/rest/output/sec_level/two_sample_madrs_interaction/falff_slow_4_analysis/']

    for n, outdir in enumerate(outdirs):

        stroke_hdir = '/home/peter/Desktop/prepare/rest/output/'
        all_stroke_files = glob.glob(stroke_hdir + '/*/f_alff/' + maps[n])
        stroke_files = [x for x in all_stroke_files if '12months' not in x]
        stroke_files.sort()

        #Convert to DataFrame, cull subs that aren't in preprocessed data
        stroke_files = pd.DataFrame(stroke_files, columns=['files'])
        if n == 0:
            stroke_files['initials'] = os.path.split(
                os.path.split(stroke_files['files'][n])[0])[0][-2:]
        else:
            stroke_files['initials'] = os.path.split(
                os.path.split(stroke_files['files'][n])[0])[0][-2:]
        cull_list = stroke_files['initials'].isin(df['initials'])
        df_cull = df[cull_list]
        df_cull.rename(columns={'Unnamed: 0': 'old_idx'}, inplace=True)
        #Specially remove the second HK (15-28) who refused to die...
        #df_cull = df_cull[df_cull['old_idx'] != 194]
        #df_cull.reset_index(inplace = True, drop = True)
        try:
            np.all(df_cull['initials'] == stroke_files['initials']) == 1
            stroke_files['madrs_3m'] = df_cull['MADRS_score_3mth']
        except:
            print 'Data are not of identical length. Error in script.'

    #ADD LOW / HIGH GROUPS TO CULLED DF

        stroke_files['group'] = np.zeros(len(stroke_files))

        stroke_files['group'][stroke_files['madrs_3m'] <= 8] = 1
        stroke_files['group'][stroke_files['madrs_3m'] > 8] = 2
        stroke_files.to_csv(
            '/home/peter/Dropbox/post_doc/florey_leeanne/study_scripts/prepare/prepare_files_madrs_grouped.csv'
        )

        #SPLIT GROUPS & MAKE COVARIATE DEFINITIONS
        stroke_low = stroke_files[stroke_files['madrs_3m'] <= 8]
        stroke_high = stroke_files[stroke_files['madrs_3m'] > 8]

        #CALC MADRS STANDARDIZATION (0 - 1)
        lmin = stroke_low.madrs_3m.min()
        lmax = stroke_low.madrs_3m.max()
        stroke_low['madrs_3m_stand'] = (stroke_low.madrs_3m - lmin) / (lmax -
                                                                       lmin)

        hmin = stroke_high.madrs_3m.min()
        hmax = stroke_high.madrs_3m.max()
        stroke_high['madrs_3m_stand'] = (stroke_high.madrs_3m - hmin) / (hmax -
                                                                         hmin)

        if stand == 1:
            outdir = outdir[:-1] + '_stand'
            cov1 = np.zeros(len(stroke_files))
            cov1[:len(stroke_low)] = stroke_low['madrs_3m_stand']
            cov1 = {
                'vector': cov1.tolist(),
                'name': 'low_madrs',
                'interaction': 1,
                'centering': 5
            }

            cov2 = np.zeros(len(stroke_files))
            cov2[len(stroke_low):] = stroke_high['madrs_3m_stand']
            cov2 = {
                'vector': cov2.tolist(),
                'name': 'high_madrs',
                'interaction': 1,
                'centering': 5
            }

        elif stand == 0:
            cov1 = np.zeros(len(stroke_files)).astype(int)
            cov1[:len(stroke_low)] = stroke_low['madrs_3m'].astype(int)
            cov1 = {
                'vector': cov1.tolist(),
                'name': 'low_madrs',
                'interaction': 1,
                'centering': 5
            }

            cov2 = np.zeros(len(stroke_files)).astype(int)
            cov2[len(stroke_low):] = stroke_high['madrs_3m'].astype(int)
            cov2 = {
                'vector': cov2.tolist(),
                'name': 'high_madrs',
                'interaction': 1,
                'centering': 5
            }

        else:
            print(
                'Invalid value %i. Please enter either 0 for non-standardised or 1 for standardised'
                % (stand), )

    #INITIALISE T-TEST OBJECT
        ttest = Node(TwoSampleTTestDesign(), name='TwoSampleT')
        ttest.inputs.group1_files = stroke_low['files'].tolist()
        ttest.inputs.group2_files = stroke_high['files'].tolist()
        ttest.inputs.covariates = [cov1, cov2]
        #ttest.inputs.explicit_mask_file = mask_file

        #Estimate model (aka betas)
        modelEst = Node(EstimateModel(), name='EstimateModel')
        modelEst.inputs.estimation_method = {'Classical': 1}

        #Estimate contrasts
        conEst = Node(EstimateContrast(), name='EstimateContrasts')

        con_1 = ('all', 'T',
                 ['Group_{1}', 'Group_{2}', 'low_madrs',
                  'high_madrs'], [1.0, 1.0, 0.0, 0.0])
        con_2 = ('low', 'T',
                 ['Group_{1}', 'Group_{2}', 'low_madrs',
                  'high_madrs'], [1.0, 0.0, 0.0, 0.0])
        con_3 = ('high', 'T',
                 ['Group_{1}', 'Group_{2}', 'low_madrs',
                  'high_madrs'], [0.0, 1.0, 0.0, 0.0])
        con_4 = ('low GT high', 'T',
                 ['Group_{1}', 'Group_{2}', 'low_madrs',
                  'high_madrs'], [1.0, -1.0, 0.0, 0.0])
        con_5 = ('high GT low', 'T',
                 ['Group_{1}', 'Group_{2}', 'low_madrs',
                  'high_madrs'], [-1.0, 1.0, 0.0, 0.0])
        con_6 = ('low vs. high', 'T',
                 ['Group_{1}', 'Group_{2}', 'low_madrs',
                  'high_madrs'], [0.0, 0.0, 1.0, -1.0])
        con_7 = ('high vs. low', 'T',
                 ['Group_{1}', 'Group_{2}', 'low_madrs',
                  'high_madrs'], [0.0, 0.0, -1.0, 1.0])

        contrasts = [con_1, con_2, con_3, con_4, con_5, con_6, con_7]

        conEst.inputs.contrasts = contrasts
        conEst.inputs.group_contrast = True

        #RUN
        l2analysis = Workflow(name='l2analysis')
        l2analysis.base_dir = outdir

        l2analysis.connect([
            (ttest, modelEst, [('spm_mat_file', 'spm_mat_file')]),
            (modelEst, conEst, [('spm_mat_file', 'spm_mat_file'),
                                ('beta_images', 'beta_images'),
                                ('residual_image', 'residual_image')]),
        ])

        l2analysis.write_graph(graph2use='colored')
        l2analysis.run('MultiProc', plugin_args={'n_procs': 1})
Beispiel #13
0
def prepare_indT_madrs():
    outdirs = [
        '/home/peter/Desktop/prepare/rest/output/sec_level/two_sample_madrs_main/alff_analysis_noglob/',
        '/home/peter/Desktop/prepare/rest/output/sec_level/two_sample_madrs_main/falff_analysis_noglob/',
        '/home/peter/Desktop/prepare/rest/output/sec_level/two_sample_madrs_main/falff_slow_5_analysis_noglob/',
        '/home/peter/Desktop/prepare/rest/output/sec_level/two_sample_madrs_main/falff_slow_4_analysis_noglob/'
    ]

    for n, outdir in enumerate(outdirs):

        stroke_hdir = '/home/peter/Desktop/prepare/rest/output/'
        all_stroke_files = glob.glob(stroke_hdir + '/*/f_alff/' + maps[n])
        stroke_files = [x for x in all_stroke_files if '12months' not in x]
        stroke_files.sort()

        #Convert to DataFrame, cull subs that aren't in preprocessed data
        stroke_files = pd.DataFrame(stroke_files, columns=['files'])
        if n == 0:
            stroke_files['initials'] = os.path.split(
                os.path.split(stroke_files['files'][n])[0])[0][-2:]
        else:
            stroke_files['initials'] = os.path.split(
                os.path.split(stroke_files['files'][n])[0])[0][-2:]
        cull_list = stroke_files['initials'].isin(df['initials'])
        df_cull = df[cull_list]
        df_cull.rename(columns={'Unnamed: 0': 'old_idx'}, inplace=True)
        #Specially remove the second HK (15-28) who refused to die...
        #df_cull = df_cull[df_cull['old_idx'] != 194]
        #df_cull.reset_index(inplace = True, drop = True)
        try:
            np.all(df_cull['initials'] == stroke_files['initials']) == 1
            stroke_files['madrs_3m'] = df_cull['MADRS_score_3mth']
        except:
            print 'Data are not of identical length. Error in script.'

    #SPLIT GROUPS & MAKE COVARIATE DEFINITIONS
        stroke_low = stroke_files[stroke_files['madrs_3m'] <= 8]
        stroke_high = stroke_files[stroke_files['madrs_3m'] > 8]

        #INITIALISE T-TEST OBJECT
        ttest = Node(TwoSampleTTestDesign(), name='TwoSampleT')
        ttest.inputs.group1_files = stroke_low['files'].tolist()
        ttest.inputs.group2_files = stroke_high['files'].tolist()
        ttest.inputs.explicit_mask_file = mask_file

        #Estimate model (aka betas)
        modelEst = Node(EstimateModel(), name='EstimateModel')
        modelEst.inputs.estimation_method = {'Classical': 1}

        #Estimate contrasts
        conEst = Node(EstimateContrast(), name='EstimateContrasts')

        con_1 = ('mean_low', 'T', ['Group_{1}', 'Group_{2}'], [1.0, 0.0])
        con_2 = ('mean_high', 'T', ['Group_{1}', 'Group_{2}'], [0.0, 1.0])
        con_3 = ('Low>High', 'T', ['Group_{1}', 'Group_{2}'], [1.0, -1.0])
        con_4 = ('High>Low', 'T', ['Group_{1}', 'Group_{2}'], [-1.0, 1.0])

        contrasts = [con_1, con_2, con_3, con_4]

        conEst.inputs.contrasts = contrasts
        conEst.inputs.group_contrast = True

        #RUN
        l2analysis = Workflow(name='l2analysis')
        l2analysis.base_dir = outdir

        l2analysis.connect([
            (ttest, modelEst, [('spm_mat_file', 'spm_mat_file')]),
            (modelEst, conEst, [('spm_mat_file', 'spm_mat_file'),
                                ('beta_images', 'beta_images'),
                                ('residual_image', 'residual_image')]),
        ])

        l2analysis.write_graph(graph2use='colored')
        l2analysis.run('MultiProc', plugin_args={'n_procs': 1})