def make_sink(self):
     sink = Node(interface=DataSink(), name='sink')
     sink.inputs.base_directory = os.path.join(self.bids_root,
                                               'derivatives', 'NeuroMET')
     sink.inputs.substitutions = [
         ('/_uniden_prefix_derivatives..Siemens.._uniden_suffix_desc-UNIDEN/',
          '/anat/'),
         ('/_uniden_prefix_derivatives..Siemens.._uniden_suffix_desc-UNIDEN_MP2RAGE/',
          '/anat/'), ('/_uniden_prefix__uniden_suffix_T1w/', '/anat/'),
         ('/recon_all', '/anat/recon_all')
     ]
     sink.inputs.regexp_substitutions = [
         (r'_subject_id_2(?P<subid>[0-9][0-9][0-9])T(?P<sesid>[0-9])',
          r'sub-NeuroMET\g<subid>/ses-0\g<sesid>'),
         (r'c1(.*)_MP2RAGE_reoriented_maths_maths_bin.nii.gz',
          r'\1_ro_brain_bin.nii.gz'),
         (r'c1(.*)_MP2RAGE_reoriented.nii', r'\1_ro_GM_bin.nii'),
         (r'c2(.*)_MP2RAGE_reoriented.nii', r'\1_ro_WM_bin.nii'),
         (r'c3(.*)_MP2RAGE_reoriented.nii', r'\1_ro_CSF_bin.nii'),
         (r'msub-(.*)_MP2RAGE_reoriented.nii', r'sub-\1_ro_bfcorr.nii'),
         (r'c1(.*)_MP2RAGE_reoriented_maths_maths_bin.nii.gz',
          r'\1_ro_brain_bin.nii.gz'),
         (r'c1(.*)_T1w_reoriented.nii', r'\1_desc-UNI_ro_GM_bin.nii'),
         (r'c2(.*)_T1w_reoriented.nii', r'\1_desc-UNI_ro_WM_bin.nii'),
         (r'c3(.*)_T1w_reoriented.nii', r'\1_desc-UNI_ro_CSF_bin.nii'),
         (r'(.*)_T1w_reoriented.nii', r'\1_desc-UNI_ro.nii'),
         (r'msub-(.*)_T1w_reoriented.nii',
          r'sub-\1_desc-UNI_ro_bfcorr.nii'),
         (r'c1(.*)_T1w_reoriented_maths_maths_bin.nii.gz',
          r'\1_desc-UNI_ro_brain_bin.nii.gz'),
         (r'(.*)-UNIDEN_ro_bfcorr_masked_maths.nii.gz',
          r'anat/\1_UNIbrain_DENskull.nii.gz'),
     ]
     return sink
Пример #2
0
def make_workflow():

    flairs = [os.path.abspath(i) for i in glob.glob(args.flair)]
    weights = [os.path.abspath(i) for i in glob.glob(args.weights)]
    weights_source = Node(interface=IdentityInterface(fields=['weights']),
                          name='weights_source')
    weights_source.inputs.weights = weights

    data_source = Node(IdentityInterface(fields=['flairs']),
                       name='data_source')
    data_source.iterables = ('flairs', flairs)

    sink = Node(interface=DataSink(), name='sink')
    sink.inputs.base_directory = wmh_dir
    sink.inputs.substitutions = [
        ('_flairs_', ''),
        ('_FLAIR.nii.gz/', '/'),
    ]
    sink.inputs.regexp_substitutions = [
        ('\.\..*\.\.', ''),
    ]

    test_wf = ibbmTum_wf.get_test_wf(row_st=192, cols_st=192, thres_mask=10)

    wmh = Workflow(name='wmh', base_dir=wf_temp)

    wmh.connect(weights_source, 'weights', test_wf, 'inputspec.weights')
    wmh.connect(data_source, 'flairs', test_wf, 'inputspec.flair')
    wmh.connect(test_wf, 'outputspec.wmh_mask', sink, '@pred')

    return wmh
 def make_sink(self):
     sink = Node(interface=DataSink(), name='sink')
     sink.inputs.base_directory = self.w_dir
     sink.inputs.substitutions = [
         ('_subject_id_', self.subject_prefix),
         ('DEN_mp2rage_orig_reoriented_masked_maths',
          'mUNIbrain_DENskull_SPMmasked'),
         ('_mp2rage_orig_reoriented_maths_maths_bin', '_brain_bin')
     ]
     sink.inputs.regexp_substitutions = [
         (r'c1NeuroMet(.*).UNI_brain_bin.nii.gz',
          r'NeuroMet\1.UNI_brain_bin.nii.gz'),
         (r'c1NeuroMet(.*).DEN_brain_bin.nii.gz',
          r'NeuroMet\1.DEN_brain_bin.nii.gz')
     ]
     return sink
Пример #4
0
def define_model_results_workflow(info, subjects, qc=True):

    # TODO I am copying a lot from above ...

    # --- Workflow parameterization and data input

    # We just need two levels of iterables here: one subject-level and
    # one "flat" run-level iterable (i.e. all runs collapsing over
    # sessions). Unlike in the model fit workflow, we always want to process
    # all sessions.

    scan_info = info.scan_info
    experiment = info.experiment_name
    model = info.model_name

    iterables = generate_iterables(scan_info, experiment, subjects)
    subject_iterables, run_iterables = iterables

    subject_source = Node(IdentityInterface(["subject"]),
                          name="subject_source",
                          iterables=("subject", subject_iterables))

    run_source = Node(IdentityInterface(["subject", "run"]),
                      name="run_source",
                      itersource=("subject_source", "subject"),
                      iterables=("run", run_iterables))

    data_input = Node(
        ModelResultsInput(experiment=experiment,
                          model=model,
                          proc_dir=info.proc_dir), "data_input")

    # --- Run-level contrast estimation

    estimate_contrasts = Node(EstimateContrasts(info=info.trait_get()),
                              "estimate_contrasts")

    # --- Subject-level contrast estimation

    model_results = JoinNode(
        ModelResults(info=info.trait_get()),
        name="model_results",
        joinsource="run_source",
        joinfield=["contrast_files", "variance_files", "name_files"])

    # --- Data output

    save_info = Node(SaveInfo(info_dict=info.trait_get()), "save_info")

    run_output = Node(
        DataSink(base_directory=info.proc_dir, parameterization=False),
        "run_output")

    results_path = Node(
        ModelResultsPath(proc_dir=info.proc_dir,
                         experiment=experiment,
                         model=model), "results_path")

    subject_output = Node(
        DataSink(base_directory=info.proc_dir, parameterization=False),
        "subject_output")

    # === Assemble pipeline

    cache_base = op.join(info.cache_dir, experiment)
    workflow = Workflow(name="model_results", base_dir=cache_base)

    # Connect processing nodes

    processing_edges = [
        (subject_source, run_source, [("subject", "subject")]),
        (subject_source, data_input, [("subject", "subject")]),
        (run_source, data_input, [("run", "run_tuple")]),
        (data_input, estimate_contrasts, [("mask_file", "mask_file"),
                                          ("beta_file", "beta_file"),
                                          ("error_file", "error_file"),
                                          ("ols_file", "ols_file"),
                                          ("design_file", "design_file")]),
        (data_input, model_results, [("anat_file", "anat_file")]),
        (estimate_contrasts, model_results,
         [("contrast_file", "contrast_files"),
          ("variance_file", "variance_files"), ("name_file", "name_files")]),
        (run_source, save_info, [("run", "parameterization")]),
        (save_info, run_output, [("info_file", "qc.@info_json")]),
        (data_input, run_output, [("output_path", "container")]),
        (estimate_contrasts, run_output, [("contrast_file", "@contrast"),
                                          ("variance_file", "@variance"),
                                          ("tstat_file", "@tstat"),
                                          ("name_file", "@names")]),
        (subject_source, results_path, [("subject", "subject")]),
        (results_path, subject_output, [("output_path", "container")]),
        (model_results, subject_output, [("result_directories", "@results")]),
    ]
    workflow.connect(processing_edges)

    return workflow
Пример #5
0
def define_model_fit_workflow(info, subjects, sessions, qc=True):

    # --- Workflow parameterization and data input

    # We just need two levels of iterables here: one subject-level and
    # one "flat" run-level iterable (i.e. all runs collapsing over
    # sessions). But we want to be able to specify sessions to process.

    scan_info = info.scan_info
    experiment = info.experiment_name
    model = info.model_name

    iterables = generate_iterables(scan_info, experiment, subjects, sessions)
    subject_iterables, run_iterables = iterables

    subject_source = Node(IdentityInterface(["subject"]),
                          name="subject_source",
                          iterables=("subject", subject_iterables))

    run_source = Node(IdentityInterface(["subject", "run"]),
                      name="run_source",
                      itersource=("subject_source", "subject"),
                      iterables=("run", run_iterables))

    data_input = Node(
        ModelFitInput(experiment=experiment,
                      model=model,
                      proc_dir=info.proc_dir), "data_input")

    # --- Data filtering and model fitting

    fit_model = Node(ModelFit(data_dir=info.data_dir, info=info.trait_get()),
                     "fit_model")

    # --- Data output

    save_info = Node(SaveInfo(info_dict=info.trait_get()), "save_info")

    data_output = Node(
        DataSink(base_directory=info.proc_dir, parameterization=False),
        "data_output")

    # === Assemble pipeline

    cache_base = op.join(info.cache_dir, experiment)
    workflow = Workflow(name="model_fit", base_dir=cache_base)

    # Connect processing nodes

    processing_edges = [
        (subject_source, run_source, [("subject", "subject")]),
        (subject_source, data_input, [("subject", "subject")]),
        (run_source, data_input, [("run", "run_tuple")]),
        (data_input, fit_model, [("subject", "subject"),
                                 ("session", "session"), ("run", "run"),
                                 ("seg_file", "seg_file"),
                                 ("surf_file", "surf_file"),
                                 ("mask_file", "mask_file"),
                                 ("ts_file", "ts_file"),
                                 ("noise_file", "noise_file"),
                                 ("mc_file", "mc_file")]),
        (data_input, data_output, [("output_path", "container")]),
        (fit_model, data_output,
         [("mask_file", "@mask"), ("beta_file", "@beta"),
          ("error_file", "@error"), ("ols_file", "@ols"),
          ("resid_file", "@resid"), ("design_file", "@design")]),
    ]
    workflow.connect(processing_edges)

    qc_edges = [
        (run_source, save_info, [("run", "parameterization")]),
        (save_info, data_output, [("info_file", "qc.@info_json")]),
        (fit_model, data_output, [("design_plot", "qc.@design_plot"),
                                  ("resid_plot", "qc.@resid_plot"),
                                  ("error_plot", "qc.@error_plot")]),
    ]
    if qc:
        workflow.connect(qc_edges)

    return workflow
Пример #6
0
def create_workflow(files,
                    subject_id,
                    n_vol=0,
                    despike=True,
                    TR=None,
                    slice_times=None,
                    slice_thickness=None,
                    fieldmap_images=[],
                    norm_threshold=1,
                    num_components=6,
                    vol_fwhm=None,
                    surf_fwhm=None,
                    lowpass_freq=-1,
                    highpass_freq=-1,
                    sink_directory=os.getcwd(),
                    FM_TEdiff=2.46,
                    FM_sigma=2,
                    FM_echo_spacing=.7,
                    target_subject=['fsaverage3', 'fsaverage4'],
                    name='resting'):

    wf = Workflow(name=name)

    # Skip starting volumes
    remove_vol = MapNode(fsl.ExtractROI(t_min=n_vol, t_size=-1),
                         iterfield=['in_file'],
                         name="remove_volumes")
    remove_vol.inputs.in_file = files

    # Run AFNI's despike. This is always run, however, whether this is fed to
    # realign depends on the input configuration
    despiker = MapNode(afni.Despike(outputtype='NIFTI_GZ'),
                       iterfield=['in_file'],
                       name='despike')
    #despiker.plugin_args = {'qsub_args': '-l nodes=1:ppn='}

    wf.connect(remove_vol, 'roi_file', despiker, 'in_file')

    # Run Nipy joint slice timing and realignment algorithm
    realign = Node(nipy.SpaceTimeRealigner(), name='realign')
    realign.inputs.tr = TR
    realign.inputs.slice_times = slice_times
    realign.inputs.slice_info = 2

    if despike:
        wf.connect(despiker, 'out_file', realign, 'in_file')
    else:
        wf.connect(remove_vol, 'roi_file', realign, 'in_file')

    # Comute TSNR on realigned data regressing polynomials upto order 2
    tsnr = MapNode(TSNR(regress_poly=2), iterfield=['in_file'], name='tsnr')
    wf.connect(realign, 'out_file', tsnr, 'in_file')

    # Compute the median image across runs
    calc_median = Node(Function(input_names=['in_files'],
                                output_names=['median_file'],
                                function=median,
                                imports=imports),
                       name='median')
    wf.connect(tsnr, 'detrended_file', calc_median, 'in_files')

    # Coregister the median to the surface
    register = Node(freesurfer.BBRegister(), name='bbregister')
    register.inputs.subject_id = subject_id
    register.inputs.init = 'fsl'
    register.inputs.contrast_type = 't2'
    register.inputs.out_fsl_file = True
    register.inputs.epi_mask = True

    # Compute fieldmaps and unwarp using them
    if fieldmap_images:
        fieldmap = Node(interface=EPIDeWarp(), name='fieldmap_unwarp')
        fieldmap.inputs.tediff = FM_TEdiff
        fieldmap.inputs.esp = FM_echo_spacing
        fieldmap.inputs.sigma = FM_sigma
        fieldmap.inputs.mag_file = fieldmap_images[0]
        fieldmap.inputs.dph_file = fieldmap_images[1]
        wf.connect(calc_median, 'median_file', fieldmap, 'exf_file')

        dewarper = MapNode(interface=fsl.FUGUE(),
                           iterfield=['in_file'],
                           name='dewarper')
        wf.connect(tsnr, 'detrended_file', dewarper, 'in_file')
        wf.connect(fieldmap, 'exf_mask', dewarper, 'mask_file')
        wf.connect(fieldmap, 'vsm_file', dewarper, 'shift_in_file')
        wf.connect(fieldmap, 'exfdw', register, 'source_file')
    else:
        wf.connect(calc_median, 'median_file', register, 'source_file')

    # Get the subject's freesurfer source directory
    fssource = Node(FreeSurferSource(), name='fssource')
    fssource.inputs.subject_id = subject_id
    fssource.inputs.subjects_dir = os.environ['SUBJECTS_DIR']

    # Extract wm+csf, brain masks by eroding freesurfer labels and then
    # transform the masks into the space of the median
    wmcsf = Node(freesurfer.Binarize(), name='wmcsfmask')
    mask = wmcsf.clone('anatmask')
    wmcsftransform = Node(freesurfer.ApplyVolTransform(inverse=True,
                                                       interp='nearest'),
                          name='wmcsftransform')
    wmcsftransform.inputs.subjects_dir = os.environ['SUBJECTS_DIR']
    wmcsf.inputs.wm_ven_csf = True
    wmcsf.inputs.match = [4, 5, 14, 15, 24, 31, 43, 44, 63]
    wmcsf.inputs.binary_file = 'wmcsf.nii.gz'
    wmcsf.inputs.erode = int(np.ceil(slice_thickness))
    wf.connect(fssource, ('aparc_aseg', get_aparc_aseg), wmcsf, 'in_file')
    if fieldmap_images:
        wf.connect(fieldmap, 'exf_mask', wmcsftransform, 'source_file')
    else:
        wf.connect(calc_median, 'median_file', wmcsftransform, 'source_file')
    wf.connect(register, 'out_reg_file', wmcsftransform, 'reg_file')
    wf.connect(wmcsf, 'binary_file', wmcsftransform, 'target_file')

    mask.inputs.binary_file = 'mask.nii.gz'
    mask.inputs.dilate = int(np.ceil(slice_thickness)) + 1
    mask.inputs.erode = int(np.ceil(slice_thickness))
    mask.inputs.min = 0.5
    wf.connect(fssource, ('aparc_aseg', get_aparc_aseg), mask, 'in_file')
    masktransform = wmcsftransform.clone("masktransform")
    if fieldmap_images:
        wf.connect(fieldmap, 'exf_mask', masktransform, 'source_file')
    else:
        wf.connect(calc_median, 'median_file', masktransform, 'source_file')
    wf.connect(register, 'out_reg_file', masktransform, 'reg_file')
    wf.connect(mask, 'binary_file', masktransform, 'target_file')

    # Compute Art outliers
    art = Node(interface=ArtifactDetect(use_differences=[True, False],
                                        use_norm=True,
                                        norm_threshold=norm_threshold,
                                        zintensity_threshold=3,
                                        parameter_source='NiPy',
                                        bound_by_brainmask=True,
                                        save_plot=False,
                                        mask_type='file'),
               name="art")
    if fieldmap_images:
        wf.connect(dewarper, 'unwarped_file', art, 'realigned_files')
    else:
        wf.connect(tsnr, 'detrended_file', art, 'realigned_files')
    wf.connect(realign, 'par_file', art, 'realignment_parameters')
    wf.connect(masktransform, 'transformed_file', art, 'mask_file')

    # Compute motion regressors
    motreg = Node(Function(
        input_names=['motion_params', 'order', 'derivatives'],
        output_names=['out_files'],
        function=motion_regressors,
        imports=imports),
                  name='getmotionregress')
    wf.connect(realign, 'par_file', motreg, 'motion_params')

    # Create a filter to remove motion and art confounds
    createfilter1 = Node(Function(
        input_names=['motion_params', 'comp_norm', 'outliers'],
        output_names=['out_files'],
        function=build_filter1,
        imports=imports),
                         name='makemotionbasedfilter')
    wf.connect(motreg, 'out_files', createfilter1, 'motion_params')
    wf.connect(art, 'norm_files', createfilter1, 'comp_norm')
    wf.connect(art, 'outlier_files', createfilter1, 'outliers')

    # Filter the motion and art confounds
    filter1 = MapNode(fsl.GLM(out_res_name='timeseries.nii.gz', demean=True),
                      iterfield=['in_file', 'design'],
                      name='filtermotion')
    if fieldmap_images:
        wf.connect(dewarper, 'unwarped_file', filter1, 'in_file')
    else:
        wf.connect(tsnr, 'detrended_file', filter1, 'in_file')
    wf.connect(createfilter1, 'out_files', filter1, 'design')
    wf.connect(masktransform, 'transformed_file', filter1, 'mask')

    # Create a filter to remove noise components based on white matter and CSF
    createfilter2 = MapNode(Function(
        input_names=['realigned_file', 'mask_file', 'num_components'],
        output_names=['out_files'],
        function=extract_noise_components,
        imports=imports),
                            iterfield=['realigned_file'],
                            name='makecompcorrfilter')
    createfilter2.inputs.num_components = num_components
    wf.connect(filter1, 'out_res', createfilter2, 'realigned_file')
    wf.connect(masktransform, 'transformed_file', createfilter2, 'mask_file')

    # Filter noise components
    filter2 = MapNode(fsl.GLM(out_res_name='timeseries_cleaned.nii.gz',
                              demean=True),
                      iterfield=['in_file', 'design'],
                      name='filtercompcorr')
    wf.connect(filter1, 'out_res', filter2, 'in_file')
    wf.connect(createfilter2, 'out_files', filter2, 'design')
    wf.connect(masktransform, 'transformed_file', filter2, 'mask')

    # Smoothing using surface and volume smoothing
    smooth = MapNode(freesurfer.Smooth(), iterfield=['in_file'], name='smooth')
    smooth.inputs.proj_frac_avg = (0.1, 0.9, 0.1)
    if surf_fwhm is None:
        surf_fwhm = 5 * slice_thickness
    smooth.inputs.surface_fwhm = surf_fwhm
    if vol_fwhm is None:
        vol_fwhm = 2 * slice_thickness
    smooth.inputs.vol_fwhm = vol_fwhm
    wf.connect(filter2, 'out_res', smooth, 'in_file')
    wf.connect(register, 'out_reg_file', smooth, 'reg_file')

    # Bandpass filter the data
    bandpass = MapNode(fsl.TemporalFilter(),
                       iterfield=['in_file'],
                       name='bandpassfilter')
    if highpass_freq < 0:
        bandpass.inputs.highpass_sigma = -1
    else:
        bandpass.inputs.highpass_sigma = 1. / (2 * TR * highpass_freq)
    if lowpass_freq < 0:
        bandpass.inputs.lowpass_sigma = -1
    else:
        bandpass.inputs.lowpass_sigma = 1. / (2 * TR * lowpass_freq)
    wf.connect(smooth, 'smoothed_file', bandpass, 'in_file')

    # Convert aparc to subject functional space
    aparctransform = wmcsftransform.clone("aparctransform")
    if fieldmap_images:
        wf.connect(fieldmap, 'exf_mask', aparctransform, 'source_file')
    else:
        wf.connect(calc_median, 'median_file', aparctransform, 'source_file')
    wf.connect(register, 'out_reg_file', aparctransform, 'reg_file')
    wf.connect(fssource, ('aparc_aseg', get_aparc_aseg), aparctransform,
               'target_file')

    # Sample the average time series in aparc ROIs
    sampleaparc = MapNode(freesurfer.SegStats(avgwf_txt_file=True,
                                              default_color_table=True),
                          iterfield=['in_file'],
                          name='aparc_ts')
    sampleaparc.inputs.segment_id = ([8] + range(10, 14) + [17, 18, 26, 47] +
                                     range(49, 55) + [58] + range(1001, 1036) +
                                     range(2001, 2036))

    wf.connect(aparctransform, 'transformed_file', sampleaparc,
               'segmentation_file')
    wf.connect(bandpass, 'out_file', sampleaparc, 'in_file')

    # Sample the time series onto the surface of the target surface. Performs
    # sampling into left and right hemisphere
    target = Node(IdentityInterface(fields=['target_subject']), name='target')
    target.iterables = ('target_subject', filename_to_list(target_subject))

    samplerlh = MapNode(freesurfer.SampleToSurface(),
                        iterfield=['source_file'],
                        name='sampler_lh')
    samplerlh.inputs.sampling_method = "average"
    samplerlh.inputs.sampling_range = (0.1, 0.9, 0.1)
    samplerlh.inputs.sampling_units = "frac"
    samplerlh.inputs.interp_method = "trilinear"
    #samplerlh.inputs.cortex_mask = True
    samplerlh.inputs.out_type = 'niigz'
    samplerlh.inputs.subjects_dir = os.environ['SUBJECTS_DIR']

    samplerrh = samplerlh.clone('sampler_rh')

    samplerlh.inputs.hemi = 'lh'
    wf.connect(bandpass, 'out_file', samplerlh, 'source_file')
    wf.connect(register, 'out_reg_file', samplerlh, 'reg_file')
    wf.connect(target, 'target_subject', samplerlh, 'target_subject')

    samplerrh.set_input('hemi', 'rh')
    wf.connect(bandpass, 'out_file', samplerrh, 'source_file')
    wf.connect(register, 'out_reg_file', samplerrh, 'reg_file')
    wf.connect(target, 'target_subject', samplerrh, 'target_subject')

    # Combine left and right hemisphere to text file
    combiner = MapNode(Function(input_names=['left', 'right'],
                                output_names=['out_file'],
                                function=combine_hemi,
                                imports=imports),
                       iterfield=['left', 'right'],
                       name="combiner")
    wf.connect(samplerlh, 'out_file', combiner, 'left')
    wf.connect(samplerrh, 'out_file', combiner, 'right')

    # Compute registration between the subject's structural and MNI template
    # This is currently set to perform a very quick registration. However, the
    # registration can be made significantly more accurate for cortical
    # structures by increasing the number of iterations
    # All parameters are set using the example from:
    # https://github.com/stnava/ANTs/blob/master/Scripts/newAntsExample.sh
    reg = Node(ants.Registration(), name='antsRegister')
    reg.inputs.output_transform_prefix = "output_"
    reg.inputs.transforms = ['Translation', 'Rigid', 'Affine', 'SyN']
    reg.inputs.transform_parameters = [(0.1, ), (0.1, ), (0.1, ),
                                       (0.2, 3.0, 0.0)]
    # reg.inputs.number_of_iterations = ([[10000, 111110, 11110]]*3 +
    #                                    [[100, 50, 30]])
    reg.inputs.number_of_iterations = [[100, 100, 100]] * 3 + [[100, 20, 10]]
    reg.inputs.dimension = 3
    reg.inputs.write_composite_transform = True
    reg.inputs.collapse_output_transforms = False
    reg.inputs.metric = ['Mattes'] * 3 + [['Mattes', 'CC']]
    reg.inputs.metric_weight = [1] * 3 + [[0.5, 0.5]]
    reg.inputs.radius_or_number_of_bins = [32] * 3 + [[32, 4]]
    reg.inputs.sampling_strategy = ['Regular'] * 3 + [[None, None]]
    reg.inputs.sampling_percentage = [0.3] * 3 + [[None, None]]
    reg.inputs.convergence_threshold = [1.e-8] * 3 + [-0.01]
    reg.inputs.convergence_window_size = [20] * 3 + [5]
    reg.inputs.smoothing_sigmas = [[4, 2, 1]] * 3 + [[1, 0.5, 0]]
    reg.inputs.sigma_units = ['vox'] * 4
    reg.inputs.shrink_factors = [[6, 4, 2]] + [[3, 2, 1]] * 2 + [[4, 2, 1]]
    reg.inputs.use_estimate_learning_rate_once = [True] * 4
    reg.inputs.use_histogram_matching = [False] * 3 + [True]
    reg.inputs.output_warped_image = 'output_warped_image.nii.gz'
    reg.inputs.fixed_image = \
        os.path.abspath('OASIS-30_Atropos_template_in_MNI152_2mm.nii.gz')
    reg.inputs.num_threads = 4
    reg.plugin_args = {'qsub_args': '-l nodes=1:ppn=4'}

    # Convert T1.mgz to nifti for using with ANTS
    convert = Node(freesurfer.MRIConvert(out_type='niigz'), name='convert2nii')
    wf.connect(fssource, 'T1', convert, 'in_file')

    # Mask the T1.mgz file with the brain mask computed earlier
    maskT1 = Node(fsl.BinaryMaths(operation='mul'), name='maskT1')
    wf.connect(mask, 'binary_file', maskT1, 'operand_file')
    wf.connect(convert, 'out_file', maskT1, 'in_file')
    wf.connect(maskT1, 'out_file', reg, 'moving_image')

    # Convert the BBRegister transformation to ANTS ITK format
    convert2itk = MapNode(C3dAffineTool(),
                          iterfield=['transform_file', 'source_file'],
                          name='convert2itk')
    convert2itk.inputs.fsl2ras = True
    convert2itk.inputs.itk_transform = True
    wf.connect(register, 'out_fsl_file', convert2itk, 'transform_file')
    if fieldmap_images:
        wf.connect(fieldmap, 'exf_mask', convert2itk, 'source_file')
    else:
        wf.connect(calc_median, 'median_file', convert2itk, 'source_file')
    wf.connect(convert, 'out_file', convert2itk, 'reference_file')

    # Concatenate the affine and ants transforms into a list
    pickfirst = lambda x: x[0]
    merge = MapNode(Merge(2), iterfield=['in2'], name='mergexfm')
    wf.connect(convert2itk, 'itk_transform', merge, 'in2')
    wf.connect(reg, ('composite_transform', pickfirst), merge, 'in1')

    # Apply the combined transform to the time series file
    sample2mni = MapNode(ants.ApplyTransforms(),
                         iterfield=['input_image', 'transforms'],
                         name='sample2mni')
    sample2mni.inputs.input_image_type = 3
    sample2mni.inputs.interpolation = 'BSpline'
    sample2mni.inputs.invert_transform_flags = [False, False]
    sample2mni.inputs.reference_image = \
        os.path.abspath('OASIS-30_Atropos_template_in_MNI152_2mm.nii.gz')
    sample2mni.inputs.terminal_output = 'file'
    wf.connect(bandpass, 'out_file', sample2mni, 'input_image')
    wf.connect(merge, 'out', sample2mni, 'transforms')

    # Sample the time series file for each subcortical roi
    ts2txt = MapNode(Function(
        input_names=['timeseries_file', 'label_file', 'indices'],
        output_names=['out_file'],
        function=extract_subrois,
        imports=imports),
                     iterfield=['timeseries_file'],
                     name='getsubcortts')
    ts2txt.inputs.indices = [8] + range(10, 14) + [17, 18, 26, 47] +\
                            range(49, 55) + [58]
    ts2txt.inputs.label_file = \
        os.path.abspath(('OASIS-TRT-20_jointfusion_DKT31_CMA_labels_in_MNI152_'
                         '2mm.nii.gz'))
    wf.connect(sample2mni, 'output_image', ts2txt, 'timeseries_file')

    # Save the relevant data into an output directory
    datasink = Node(interface=DataSink(), name="datasink")
    datasink.inputs.base_directory = sink_directory
    datasink.inputs.container = subject_id
    datasink.inputs.substitutions = [('_target_subject_', '')]
    datasink.inputs.regexp_substitutions = (r'(/_.*(\d+/))', r'/run\2')
    wf.connect(despiker, 'out_file', datasink, 'resting.qa.despike')
    wf.connect(realign, 'par_file', datasink, 'resting.qa.motion')
    wf.connect(tsnr, 'tsnr_file', datasink, 'resting.qa.tsnr')
    wf.connect(tsnr, 'mean_file', datasink, 'resting.qa.tsnr.@mean')
    wf.connect(tsnr, 'stddev_file', datasink, 'resting.qa.@tsnr_stddev')
    if fieldmap_images:
        wf.connect(fieldmap, 'exf_mask', datasink, 'resting.reference')
    else:
        wf.connect(calc_median, 'median_file', datasink, 'resting.reference')
    wf.connect(art, 'norm_files', datasink, 'resting.qa.art.@norm')
    wf.connect(art, 'intensity_files', datasink, 'resting.qa.art.@intensity')
    wf.connect(art, 'outlier_files', datasink, 'resting.qa.art.@outlier_files')
    wf.connect(mask, 'binary_file', datasink, 'resting.mask')
    wf.connect(masktransform, 'transformed_file', datasink,
               'resting.mask.@transformed_file')
    wf.connect(register, 'out_reg_file', datasink,
               'resting.registration.bbreg')
    wf.connect(reg, ('composite_transform', pickfirst), datasink,
               'resting.registration.ants')
    wf.connect(register, 'min_cost_file', datasink,
               'resting.qa.bbreg.@mincost')
    wf.connect(smooth, 'smoothed_file', datasink,
               'resting.timeseries.fullpass')
    wf.connect(bandpass, 'out_file', datasink, 'resting.timeseries.bandpassed')
    wf.connect(sample2mni, 'output_image', datasink, 'resting.timeseries.mni')
    wf.connect(createfilter1, 'out_files', datasink,
               'resting.regress.@regressors')
    wf.connect(createfilter2, 'out_files', datasink,
               'resting.regress.@compcorr')
    wf.connect(sampleaparc, 'summary_file', datasink,
               'resting.parcellations.aparc')
    wf.connect(sampleaparc, 'avgwf_txt_file', datasink,
               'resting.parcellations.aparc.@avgwf')
    wf.connect(ts2txt, 'out_file', datasink,
               'resting.parcellations.grayo.@subcortical')
    datasink2 = Node(interface=DataSink(), name="datasink2")
    datasink2.inputs.base_directory = sink_directory
    datasink2.inputs.container = subject_id
    datasink2.inputs.substitutions = [('_target_subject_', '')]
    datasink2.inputs.regexp_substitutions = (r'(/_.*(\d+/))', r'/run\2')
    wf.connect(combiner, 'out_file', datasink2,
               'resting.parcellations.grayo.@surface')
    return wf
Пример #7
0
    return ([full_path, full_path_2])


merge_snr_T1_info_node = Node(Function(input_names=[
    'gm_m', 'gm_s', 'gm_l', 'wm_m', 'wm_s', 'wm_l', 'csf_m', 'csf_s', 'csf_l',
    'bg_m', 'bg_s', 'bg_l'
],
                                       output_names=['summary_snr', 'snr_cnr'],
                                       function=merge_snr_info),
                              name="Summarize_SNR")

###END SNR

##Part 4: DataSink:
#Datasink.
datasink_sub = Node(DataSink(), name="DataSink")
datasink_sub.inputs.base_directory = output_dir
datasink_sub.inputs.substitutions = substitutions_sink

###Workflow
midpro_flow = Workflow(name="MidMRI")
midpro_flow.base_dir = working_dir

#WF: Initialize
midpro_flow.connect(infosource_sub, 'subject_id', datagrab_sub, 'subject_id')

##WF: Skullstrip
midpro_flow.connect(datagrab_sub, 'c1', pre_merge, 'c1')
midpro_flow.connect(datagrab_sub, 'c2', pre_merge, 'c2')
midpro_flow.connect(datagrab_sub, 'c3', pre_merge, 'c3')
Пример #8
0
data_dir = '/Users/vgonzenb/Python/nipy/fmri-rep/data/ds000171/'

grab_anat = Node(DataGrabber(infields=['subject_id'],outfields=['anat']), name = 'grab_anat', nested = True)
grab_anat.inputs.base_directory = data_dir
grab_anat.inputs.template = '*'
grab_anat.inputs.sort_filelist = True
grab_anat.inputs.template_args = {'anat': [['subject_id']]}
grab_anat.inputs.field_template = {'anat': 'sub-%s/anat/s*T1w.nii.gz'}

# DataGrabber 2 - func

grab_func = Node(DataGrabber(infields=['subject_id', 'runs'],outfields=['func']), name = 'grab_func', nested = True)
grab_func.inputs.base_directory = data_dir
grab_func.inputs.template = '*'
grab_func.inputs.sort_filelist = True
grab_func.inputs.template_args = {'func': [['subject_id', 'runs']]}
grab_func.inputs.field_template = {'func': 'sub-%s/func/sub-*run-%d_bold.nii.gz'}

# DataSink

datasink = Node(DataSink(base_directory = os.getcwd()), name='datasink')

wf.connect([(info_subj, grab_anat, [('subject_id', 'subject_id')]),(info_subj, grab_func, [('subject_id', 'subject_id')]),(info_run, grab_func, [('run_id', 'runs')]),(info_subj, datasink, [('subject_id', 'container')]),(grab_anat, datasink, [('anat', 'anat')]),(grab_func, datasink, [('func', 'func')])])

# Plot worflow
wf.write_graph(dotfilename='try.dot', graph2use='exec', simple_form = False)
from IPython.display import Image
Image(filename="iter_subj_splitgrabber/try_detailed.png")

# Run it 
# wf.run()
Пример #9
0
                   bids_root,
                   event_id=event_id,
                   events_data=events,
                   overwrite=True)


# setup workflow nodes

# Create SelectFiles node
templates = {'eeg_raw': 'sub-{subject_id}.bdf'}

selectfiles = Node(SelectFiles(templates, base_directory=source_data_path),
                   name='selectfiles')

# Create DataSink node
datasink = Node(DataSink(base_directory=data_dir, container=bids_root),
                name="datasink")

# Infosource - a function free node to iterate over the list of subject names
infosource = Node(IdentityInterface(fields=['subject_id', 'run_id']),
                  name="infosource")
infosource.iterables = [('subject_id', subject_list), ('run_id', run_list)]

# Create split run node
split_single_file = Node(Function(
    input_names=['source_data_file', 'source_data_path', 'rb_trig'],
    output_names=['run_files', 'run_ids'],
    function=split_runs),
                         name='split_single_file')
split_single_file.inputs.source_data_path = source_data_path
split_single_file.inputs.rb_trig = run_break_trigger
Пример #10
0
def create_fsl_workflow(data_dir=None, subjects=None, name="fslwarp"):
    """Set up the anatomical normalzation workflow using FNIRT.

    Your anatomical data must have been processed in Freesurfer.
    Unlike most lyman workflows, the DataGrabber and DataSink
    nodes are hardwired within the returned workflow, as this
    tightly integrates with the Freesurfer subjects directory
    structure.

    Parameters
    ----------
    data_dir : path
        top level of data hierarchy/FS subjects directory
    subjects : list of strings
        list of subject IDs
    name : alphanumeric string, optional
        workflow name

    """
    if data_dir is None:
        data_dir = os.environ["SUBJECTS_DIR"]
    if subjects is None:
        subjects = []

    # Get target images
    target_brain = fsl.Info.standard_image("avg152T1_brain.nii.gz")
    target_head = fsl.Info.standard_image("avg152T1.nii.gz")
    hires_head = fsl.Info.standard_image("MNI152_T1_1mm.nii.gz")
    target_mask = fsl.Info.standard_image(
        "MNI152_T1_2mm_brain_mask_dil.nii.gz")
    fnirt_cfg = os.path.join(os.environ["FSLDIR"],
                             "etc/flirtsch/T1_2_MNI152_2mm.cnf")

    # Subject source node
    subjectsource = Node(IdentityInterface(fields=["subject_id"]),
                         iterables=("subject_id", subjects),
                         name="subjectsource")

    # Grab recon-all outputs
    head_image = "T1"
    templates = dict(aseg="{subject_id}/mri/aparc+aseg.mgz",
                     head="{subject_id}/mri/" + head_image + ".mgz")
    datasource = Node(SelectFiles(templates, base_directory=data_dir),
                      "datasource")

    # Convert images to nifti storage and float representation
    cvtaseg = Node(fs.MRIConvert(out_type="niigz"), "convertaseg")

    cvthead = Node(fs.MRIConvert(out_type="niigz", out_datatype="float"),
                   "converthead")

    # Turn the aparc+aseg into a brainmask
    makemask = Node(fs.Binarize(dilate=1, min=0.5), "makemask")

    # Extract the brain from the orig.mgz using the mask
    skullstrip = Node(fsl.ApplyMask(), "skullstrip")

    # FLIRT brain to MNI152_brain
    flirt = Node(fsl.FLIRT(reference=target_brain), "flirt")

    sw = [-180, 180]
    for dim in ["x", "y", "z"]:
        setattr(flirt.inputs, "searchr_%s" % dim, sw)

    # FNIRT head to MNI152
    fnirt = Node(
        fsl.FNIRT(ref_file=target_head,
                  refmask_file=target_mask,
                  config_file=fnirt_cfg,
                  fieldcoeff_file=True), "fnirt")

    # Warp and rename the images
    warpbrain = Node(
        fsl.ApplyWarp(ref_file=target_head,
                      interp="spline",
                      out_file="brain_warp.nii.gz"), "warpbrain")

    warpbrainhr = Node(
        fsl.ApplyWarp(ref_file=hires_head,
                      interp="spline",
                      out_file="brain_warp_hires.nii.gz"), "warpbrainhr")

    # Generate a png summarizing the registration
    warpreport = Node(WarpReport(), "warpreport")

    # Save relevant files to the data directory
    fnirt_subs = [(head_image + "_out_masked_flirt.mat", "affine.mat"),
                  (head_image + "_out_fieldwarp", "warpfield"),
                  (head_image + "_out_masked", "brain"),
                  (head_image + "_out", "T1")]
    datasink = Node(
        DataSink(base_directory=data_dir,
                 parameterization=False,
                 substitutions=fnirt_subs), "datasink")

    # Define and connect the workflow
    # -------------------------------

    normalize = Workflow(name=name)

    normalize.connect([
        (subjectsource, datasource, [("subject_id", "subject_id")]),
        (datasource, cvtaseg, [("aseg", "in_file")]),
        (datasource, cvthead, [("head", "in_file")]),
        (cvtaseg, makemask, [("out_file", "in_file")]),
        (cvthead, skullstrip, [("out_file", "in_file")]),
        (makemask, skullstrip, [("binary_file", "mask_file")]),
        (skullstrip, flirt, [("out_file", "in_file")]),
        (flirt, fnirt, [("out_matrix_file", "affine_file")]),
        (cvthead, fnirt, [("out_file", "in_file")]),
        (skullstrip, warpbrain, [("out_file", "in_file")]),
        (fnirt, warpbrain, [("fieldcoeff_file", "field_file")]),
        (skullstrip, warpbrainhr, [("out_file", "in_file")]),
        (fnirt, warpbrainhr, [("fieldcoeff_file", "field_file")]),
        (warpbrain, warpreport, [("out_file", "in_file")]),
        (subjectsource, datasink, [("subject_id", "container")]),
        (skullstrip, datasink, [("out_file", "normalization.@brain")]),
        (cvthead, datasink, [("out_file", "normalization.@t1")]),
        (flirt, datasink, [("out_file", "normalization.@brain_flirted")]),
        (flirt, datasink, [("out_matrix_file", "normalization.@affine")]),
        (warpbrain, datasink, [("out_file", "normalization.@brain_warped")]),
        (warpbrainhr, datasink, [("out_file", "normalization.@brain_hires")]),
        (fnirt, datasink, [("fieldcoeff_file", "normalization.@warpfield")]),
        (warpreport, datasink, [("out_file", "normalization.@report")]),
    ])

    return normalize
Пример #11
0
def preprocess_loc(experiment_dir,
                   subject_id=None,
                   run_id=None,
                   fwhm=4.0,
                   run_num=4,
                   hpcutoff=100.,
                   session_id=None,
                   task_id=None):
    """
    create a workflow for preprocessing and saving the visual localizer data
    """
    preproc = create_featreg_preproc(whichvol=None)
    preproc.inputs.inputspec.fwhm = fwhm
    preproc.inputs.inputspec.highpass = hpcutoff

    if subject_id:
        subject_list = [subject_id]
    else:
        subject_list = sorted([
            path.split(os.path.sep)[-1]
            for path in glob(os.path.join(experiment_dir, 'sub*'))
        ])

    if run_id:
        run_list = [run_id]
    else:
        run_list = []
        for i in range(1, run_num + 1):
            run_list.append('run-' + str(i))

    if session_id:
        session_list = [session_id]
    else:
        session_list = ['ses-localizer']

    if task_id:
        task_list = [task_id]
    else:
        task_list = ['task_objectcategories']

    info_source = pe.Node(util.IdentityInterface(
        fields=['subject_id', 'session_id', 'task_id', 'run_id'],
        mandatory_inputs=False),
                          name='infosource')

    info_source.iterables = [('subject_id', subject_list),
                             ('session_id', session_list),
                             ('task_id', task_list), ('run_id', run_list)]

    templates = {  #'anat':'inputs/tnt/{subject_id}/t1w/brain.nii.gz',
        'func':
        'sourcedata/aligned/{subject_id}/in_bold3Tp2/sub-*_task-objectcategories_{run_id}_bold.nii.gz'
    }

    sf = pe.Node(SelectFiles(templates), name='selectfiles')
    sf.inputs.base_directory = experiment_dir

    datasink = pe.Node(DataSink(), name='datasink')
    datasink.inputs.base_directory = experiment_dir

    # we currently do not want to slice time correct anymore,
    # regard it as superfluous
    #Slicer = pe.Node(fsl.SliceTimer(),
    #                name='Slicer')

    def get_preproc_subs(subject_id, session_id, task_id, run_id):
        subs = [('_subject_id_{}_'.format(subject_id), '')]
        subs.append(('task_id_{}'.format(task_id), ''))
        subs.append(('_run_id_{}'.format(run_id), ''))
        subs.append(('_session_id_{}'.format(session_id), ''))
        subs.append(('_addmean0', ''))
        subs.append(('_dilatemask0', ''))
        subs.append(('_maskfunc30', ''))
        subs.append(('_meanfunc30', ''))
        subs.append(('bold_dtype_bet_thresh_dil',
                     'space-custom-subject_type-brain_mask'))
        subs.append(('bold_dtype_mask_smooth_mask_gms',
                     'space-custom-subject_desc-mean'))
        subs.append(('bold_dtype_mask_smooth_mask',
                     'space-custom-subject_desc-smooth'))
        subs.append(('bold_dtype_mask_smooth_mask gms_tempfilt_maths',
                     'space-custom-subject_desc-highpass_bold'))
        subs.append(('_mean', ''))
        subs.append(('mean_tempfilt_maths', 'highpass_bold'))
        return subs

    subsgenpreproc = pe.Node(util.Function(
        input_names=['subject_id', 'session_id', 'task_id', 'run_id'],
        output_names=['substitutions'],
        function=get_preproc_subs),
                             name='subsgenpreproc')

    def ds_container_gen(subject_id):
        """
        Generate container for DataSink
        """
        from os.path import join as opj
        container = opj(subject_id, 'ses-localizer')
        return container

    ds_container = pe.Node(util.Function(input_names=['subject_id'],
                                         output_names=['container'],
                                         function=ds_container_gen),
                           name='ds_container')

    preprocwf = pe.Workflow(name='preprocessing')
    preprocwf.connect([
        (info_source, sf, [('subject_id', 'subject_id'), ('run_id', 'run_id'),
                           ('session_id', 'session_id')]),
        (info_source, ds_container, [('subject_id', 'subject_id')]),
        (ds_container, datasink, [('container', 'container')]),
        (info_source, subsgenpreproc, [('subject_id', 'subject_id'),
                                       ('session_id', 'session_id'),
                                       ('task_id', 'task_id'),
                                       ('run_id', 'run_id')]),
        #                        (sf, Slicer, [('func', 'in_file')]),
        #                        (Slicer, preproc,
        #                        [('slice_time_corrected_file', 'inputspec.func')]),
        (sf, preproc, [('func', 'inputspec.func')]),
        (subsgenpreproc, datasink, [('substitutions', 'substitutions')]),
        (preproc, datasink, [('outputspec.smoothed_files', 'func.@smooth')]),
        (preproc, datasink, [('outputspec.mask', 'func.@mask')]),
        (preproc, datasink, [('outputspec.mean', 'func.@mean')]),
        (preproc, datasink, [('outputspec.highpassed_files', 'func.@highpass')
                             ])
    ])
    return preprocwf
def main(arglist):
    """Main function for workflow setup and execution."""
    args = parse_args(arglist)

    # Get and process specific information
    project = lyman.gather_project_info()
    exp = lyman.gather_experiment_info(args.experiment, args.altmodel)

    if args.experiment is None:
        args.experiment = project["default_exp"]

    if args.altmodel:
        exp_name = "-".join([args.experiment, args.altmodel])
    else:
        exp_name = args.experiment

    # Make sure some paths are set properly
    os.environ["SUBJECTS_DIR"] = project["data_dir"]

    # Set roots of output storage
    anal_dir_base = op.join(project["analysis_dir"], exp_name)
    work_dir_base = op.join(project["working_dir"], exp_name)
    nipype.config.set("execution", "crashdump_dir", project["crash_dir"])

    ### Set up group info
    ## Regular design
    # Subject source (no iterables here)
    subject_list = lyman.determine_subjects(args.subjects)
    print subject_list

    subj_source = Node(IdentityInterface(fields=["subject_id"]),
                       name="subj_source")
    subj_source.inputs.subject_id = subject_list

    # load in covariate for source accuracy analysis
    #     cov = pd.read_csv('/Volumes/group/awagner/sgagnon/AP/results/df_sourceAcc.csv')
    #     cov_col = 'mean_acc'

    # load in covariate for cort analysis
    cov = pd.read_csv(
        '/Volumes/group/awagner/sgagnon/AP/data/cortisol/cort_percentchange_testbaseline_controlassay.csv'
    )
    cov_col = 'cort_controlassay'

    cov = cov.loc[cov.subid.isin(
        subject_list)]  # prune for those in this analysis
    cov[cov_col] = (cov[cov_col] -
                    cov[cov_col].mean()) / cov[cov_col].std()  # zscore
    print cov.describe()

    cov_reg = [
        cov[cov.subid == x].reset_index().at[0, cov_col] for x in subject_list
    ]

    # Set up the regressors and contrasts
    regressors = dict(group_mean=[int(1) for sub in subject_list],
                      z_covariate=cov_reg)
    print regressors

    contrasts = [["cov", "T", ["group_mean", "z_covariate"], [0, 1]]]

    # Subject level contrast source
    contrast_source = Node(IdentityInterface(fields=["l1_contrast"]),
                           iterables=("l1_contrast", exp["contrast_names"]),
                           name="contrast_source")

    # Group workflow
    space = args.regspace
    wf_name = "_".join([space, args.output])
    if space == "mni":
        mfx, mfx_input, mfx_output = wf.create_volume_mixedfx_workflow(
            wf_name, subject_list, regressors, contrasts, exp)
    else:
        print 'run mni!'

    # Mixed effects inputs
    ffxspace = "mni" if space == "mni" else "epi"
    ffxsmooth = "unsmoothed" if args.unsmoothed else "smoothed"
    mfx_base = op.join("{subject_id}/ffx/%s/%s/{l1_contrast}" %
                       (ffxspace, ffxsmooth))
    templates = dict(copes=op.join(mfx_base, "cope1.nii.gz"))
    if space == "mni":
        templates.update(
            dict(varcopes=op.join(mfx_base, "varcope1.nii.gz"),
                 dofs=op.join(mfx_base, "tdof_t1.nii.gz")))
    else:
        templates.update(
            dict(reg_file=op.join(anal_dir_base, "{subject_id}/preproc/run_1",
                                  "func2anat_tkreg.dat")))

    # Workflow source node
    mfx_source = MapNode(
        SelectFiles(templates,
                    base_directory=anal_dir_base,
                    sort_filelist=True), "subject_id", "mfx_source")

    # Workflow input connections
    mfx.connect([
        (contrast_source, mfx_source, [("l1_contrast", "l1_contrast")]),
        (contrast_source, mfx_input, [("l1_contrast", "l1_contrast")]),
        (subj_source, mfx_source, [("subject_id", "subject_id")]),
        (mfx_source, mfx_input, [("copes", "copes")])
    ]),
    if space == "mni":
        mfx.connect([
            (mfx_source, mfx_input, [("varcopes", "varcopes"),
                                     ("dofs", "dofs")]),
        ])
    else:
        mfx.connect([(mfx_source, mfx_input, [("reg_file", "reg_file")]),
                     (subj_source, mfx_input, [("subject_id", "subject_id")])])

    # Mixed effects outputs
    mfx_sink = Node(DataSink(base_directory="/".join(
        [anal_dir_base, args.output, space]),
                             substitutions=[("/stats", "/"), ("/_hemi_", "/"),
                                            ("_glm_results", "")],
                             parameterization=True),
                    name="mfx_sink")

    mfx_outwrap = tools.OutputWrapper(mfx, subj_source, mfx_sink, mfx_output)
    mfx_outwrap.sink_outputs()
    mfx_outwrap.set_mapnode_substitutions(1)
    mfx_outwrap.add_regexp_substitutions([(r"_l1_contrast_[-\w]*/", "/"),
                                          (r"_mni_hemi_[lr]h", "")])
    mfx.connect(contrast_source, "l1_contrast", mfx_sink, "container")

    # Set a few last things
    mfx.base_dir = work_dir_base

    # Execute
    lyman.run_workflow(mfx, args=args)

    # Clean up
    if project["rm_working_dir"]:
        shutil.rmtree(project["working_dir"])
Пример #13
0
def main(arglist):
    """Main function for workflow setup and execution."""
    args = parse_args(arglist)

    # Get and process specific information
    project = lyman.gather_project_info()
    exp = lyman.gather_experiment_info(args.experiment, args.altmodel, args)

    # Set up the SUBJECTS_DIR for Freesurfer
    os.environ["SUBJECTS_DIR"] = project["data_dir"]

    # Subject is always highest level of parameterization
    subject_list = lyman.determine_subjects(args.subjects)
    subj_source = tools.make_subject_source(subject_list)

    # Get the full correct name for the experiment
    if args.experiment is None:
        exp_name = project["default_exp"]
    else:
        exp_name = args.experiment

    exp_base = exp_name
    if args.altmodel is not None:
        exp_name = "-".join([exp_base, args.altmodel])

    # Set roots of output storage
    data_dir = project["data_dir"]
    analysis_dir = op.join(project["analysis_dir"], exp_name)
    working_dir = op.join(project["working_dir"], exp_name)
    nipype.config.set("execution", "crashdump_dir", project["crash_dir"])

    # Create symlinks to the preproc directory for altmodels
    if not op.exists(analysis_dir):
        os.makedirs(analysis_dir)
    if exp_base != exp_name:
        for subj in subject_list:
            subj_dir = op.join(analysis_dir, subj)
            if not op.exists(subj_dir):
                os.makedirs(subj_dir)
            link_dir = op.join(analysis_dir, subj, "preproc")
            if not op.exists(link_dir):
                preproc_dir = op.join("../..", exp_base, subj, "preproc")
                os.symlink(preproc_dir, link_dir)

    # For later processing steps, are we using smoothed inputs?
    smoothing = "unsmoothed" if args.unsmoothed else "smoothed"

    # Also define the regspace variable here
    space = args.regspace

    # ----------------------------------------------------------------------- #
    # Preprocessing Workflow
    # ----------------------------------------------------------------------- #

    # Create workflow in function defined elsewhere in this package
    preproc, preproc_input, preproc_output = wf.create_preprocessing_workflow(
        exp_info=exp)

    # Collect raw nifti data
    preproc_templates = dict(timeseries=exp["source_template"])
    if exp["partial_brain"]:
        preproc_templates["whole_brain"] = exp["whole_brain_template"]
    if exp["fieldmap_template"]:
        preproc_templates["fieldmap"] = exp["fieldmap_template"]

    preproc_source = Node(
        SelectFiles(preproc_templates, base_directory=project["data_dir"]),
        "preproc_source")

    # Convenience class to handle some sterotyped connections
    # between run-specific nodes (defined here) and the inputs
    # to the prepackaged workflow returned above
    preproc_inwrap = tools.InputWrapper(preproc, subj_source, preproc_source,
                                        preproc_input)
    preproc_inwrap.connect_inputs()

    # Store workflow outputs to persistant location
    preproc_sink = Node(DataSink(base_directory=analysis_dir), "preproc_sink")

    # Similar to above, class to handle sterotyped output connections
    preproc_outwrap = tools.OutputWrapper(preproc, subj_source, preproc_sink,
                                          preproc_output)
    preproc_outwrap.set_subject_container()
    preproc_outwrap.set_mapnode_substitutions(exp["n_runs"])
    preproc_outwrap.sink_outputs("preproc")

    # Set the base for the possibly temporary working directory
    preproc.base_dir = working_dir

    # Possibly execute the workflow, depending on the command line
    lyman.run_workflow(preproc, "preproc", args)

    # ----------------------------------------------------------------------- #
    # Timeseries Model
    # ----------------------------------------------------------------------- #

    # Create a modelfitting workflow and specific nodes as above
    model, model_input, model_output = wf.create_timeseries_model_workflow(
        name=smoothing + "_model", exp_info=exp)

    model_base = op.join(analysis_dir, "{subject_id}/preproc/run_*/")
    model_templates = dict(
        timeseries=op.join(model_base, smoothing + "_timeseries.nii.gz"),
        realign_file=op.join(model_base, "realignment_params.csv"),
        nuisance_file=op.join(model_base, "nuisance_variables.csv"),
        artifact_file=op.join(model_base, "artifacts.csv"),
    )

    if exp["design_name"] is not None:
        design_file = exp["design_name"] + ".csv"
        regressor_file = exp["design_name"] + ".csv"
        model_templates["design_file"] = op.join(data_dir, "{subject_id}",
                                                 "design", design_file)
    if exp["regressor_file"] is not None:
        regressor_file = exp["regressor_file"] + ".csv"
        model_templates["regressor_file"] = op.join(data_dir, "{subject_id}",
                                                    "design", regressor_file)

    model_source = Node(SelectFiles(model_templates), "model_source")

    model_inwrap = tools.InputWrapper(model, subj_source, model_source,
                                      model_input)
    model_inwrap.connect_inputs()

    model_sink = Node(DataSink(base_directory=analysis_dir), "model_sink")

    model_outwrap = tools.OutputWrapper(model, subj_source, model_sink,
                                        model_output)
    model_outwrap.set_subject_container()
    model_outwrap.set_mapnode_substitutions(exp["n_runs"])
    model_outwrap.sink_outputs("model." + smoothing)

    # Set temporary output locations
    model.base_dir = working_dir

    # Possibly execute the workflow
    lyman.run_workflow(model, "model", args)

    # ----------------------------------------------------------------------- #
    # Across-Run Registration
    # ----------------------------------------------------------------------- #

    # Is this a model or timeseries registration?
    regtype = "timeseries" if (args.timeseries or args.residual) else "model"

    # Are we registering across experiments?
    cross_exp = args.regexp is not None

    # Retrieve the right workflow function for registration
    # Get the workflow function dynamically based on the space
    warp_method = project["normalization"]
    flow_name = "%s_%s_reg" % (space, regtype)
    reg, reg_input, reg_output = wf.create_reg_workflow(
        flow_name, space, regtype, warp_method, args.residual, cross_exp)

    # Define a smoothing info node here. Use an iterable so that running
    # with/without smoothing doesn't clobber working directory files
    # for the other kind of execution
    smooth_source = Node(IdentityInterface(fields=["smoothing"]),
                         iterables=("smoothing", [smoothing]),
                         name="smooth_source")

    # Set up the registration inputs and templates
    reg_templates = dict(
        masks="{subject_id}/preproc/run_*/functional_mask.nii.gz",
        means="{subject_id}/preproc/run_*/mean_func.nii.gz",
    )

    if regtype == "model":
        # First-level model summary statistic images
        reg_base = "{subject_id}/model/{smoothing}/run_*/"
        reg_templates.update(
            dict(
                copes=op.join(reg_base, "cope*.nii.gz"),
                varcopes=op.join(reg_base, "varcope*.nii.gz"),
                sumsquares=op.join(reg_base, "ss*.nii.gz"),
            ))
    else:
        # Timeseries images
        if args.residual:
            ts_file = op.join("{subject_id}/model/{smoothing}/run_*/",
                              "results/res4d.nii.gz")
        else:
            ts_file = op.join("{subject_id}/preproc/run_*/",
                              "{smoothing}_timeseries.nii.gz")
        reg_templates.update(dict(timeseries=ts_file))
    reg_lists = list(reg_templates.keys())

    # Native anatomy to group anatomy affine matrix and warpfield
    if space == "mni":
        aff_ext = "mat" if warp_method == "fsl" else "txt"
        reg_templates["warpfield"] = op.join(data_dir, "{subject_id}",
                                             "normalization/warpfield.nii.gz")
        reg_templates["affine"] = op.join(data_dir, "{subject_id}",
                                          "normalization/affine." + aff_ext)
    else:
        if args.regexp is None:
            tkreg_base = analysis_dir
        else:
            tkreg_base = op.join(project["analysis_dir"], args.regexp)
        reg_templates["tkreg_rigid"] = op.join(tkreg_base, "{subject_id}",
                                               "preproc", "run_1",
                                               "func2anat_tkreg.dat")

    # Rigid (6dof) functional-to-anatomical matrices
    rigid_stem = "{subject_id}/preproc/run_*/func2anat_"
    if warp_method == "ants" and space == "mni":
        reg_templates["rigids"] = rigid_stem + "tkreg.dat"
    else:
        reg_templates["rigids"] = rigid_stem + "flirt.mat"

    # Rigid matrix from anatomy to target experiment space
    if args.regexp is not None:
        targ_analysis_dir = op.join(project["analysis_dir"], args.regexp)
        reg_templates["first_rigid"] = op.join(targ_analysis_dir,
                                               "{subject_id}", "preproc",
                                               "run_1", "func2anat_flirt.mat")

    # Define the registration data source node
    reg_source = Node(
        SelectFiles(reg_templates,
                    force_lists=reg_lists,
                    base_directory=analysis_dir), "reg_source")

    # Registration inputnode
    reg_inwrap = tools.InputWrapper(reg, subj_source, reg_source, reg_input)
    reg_inwrap.connect_inputs()

    # The source node also needs to know about the smoothing on this run
    reg.connect(smooth_source, "smoothing", reg_source, "smoothing")

    # Set up the registration output and datasink
    reg_sink = Node(DataSink(base_directory=analysis_dir), "reg_sink")

    reg_outwrap = tools.OutputWrapper(reg, subj_source, reg_sink, reg_output)
    reg_outwrap.set_subject_container()
    reg_outwrap.sink_outputs("reg.%s" % space)

    # Reg has some additional substitutions to strip out iterables
    # and rename the timeseries file
    reg_subs = [("_smoothing_", "")]
    reg_outwrap.add_regexp_substitutions(reg_subs)

    # Add dummy substitutions for the contasts to make sure the DataSink
    # reruns when the deisgn has changed. This accounts for the problem where
    # directory inputs are treated as strings and the contents/timestamps are
    # not hashed, which should be fixed upstream soon.
    contrast_subs = [(c, c) for c in exp["contrast_names"]]
    reg_outwrap.add_regexp_substitutions(contrast_subs)

    reg.base_dir = working_dir

    # Possibly run registration workflow and clean up
    lyman.run_workflow(reg, "reg", args)

    # ----------------------------------------------------------------------- #
    # Across-Run Fixed Effects Model
    # ----------------------------------------------------------------------- #

    # Dynamically get the workflow
    wf_name = space + "_ffx"
    ffx, ffx_input, ffx_output = wf.create_ffx_workflow(wf_name,
                                                        space,
                                                        exp["contrast_names"],
                                                        exp_info=exp)

    ext = "_warp.nii.gz" if space == "mni" else "_xfm.nii.gz"
    ffx_base = op.join("{subject_id}/reg", space, "{smoothing}/run_*")
    ffx_templates = dict(
        copes=op.join(ffx_base, "cope*" + ext),
        varcopes=op.join(ffx_base, "varcope*" + ext),
        masks=op.join(ffx_base, "functional_mask" + ext),
        means=op.join(ffx_base, "mean_func" + ext),
        dofs="{subject_id}/model/{smoothing}/run_*/results/dof",
        ss_files=op.join(ffx_base, "ss*" + ext),
        timeseries="{subject_id}/preproc/run_*/{smoothing}_timeseries.nii.gz",
    )
    ffx_lists = list(ffx_templates.keys())

    # Space-conditional inputs
    if space == "mni":
        bg = op.join(data_dir, "{subject_id}/normalization/brain_warp.nii.gz")
        reg = op.join(os.environ["FREESURFER_HOME"],
                      "average/mni152.register.dat")
    else:
        reg_dir = "{subject_id}/reg/epi/{smoothing}/run_1"
        bg = op.join(reg_dir, "mean_func_xfm.nii.gz")
        reg = op.join(reg_dir, "func2anat_tkreg.dat")
    ffx_templates["anatomy"] = bg
    ffx_templates["reg_file"] = reg

    # Define the ffxistration data source node
    ffx_source = Node(
        SelectFiles(ffx_templates,
                    force_lists=ffx_lists,
                    base_directory=analysis_dir), "ffx_source")

    # Fixed effects inutnode
    ffx_inwrap = tools.InputWrapper(ffx, subj_source, ffx_source, ffx_input)
    ffx_inwrap.connect_inputs()

    # Connect the smoothing information
    ffx.connect(smooth_source, "smoothing", ffx_source, "smoothing")

    # Fixed effects output and datasink
    ffx_sink = Node(DataSink(base_directory=analysis_dir), "ffx_sink")

    ffx_outwrap = tools.OutputWrapper(ffx, subj_source, ffx_sink, ffx_output)
    ffx_outwrap.set_subject_container()
    ffx_outwrap.sink_outputs("ffx.%s" % space)

    # Fixed effects has some additional substitutions to strip out interables
    ffx_outwrap.add_regexp_substitutions([("_smoothing_", ""),
                                          ("flamestats", "")])

    ffx.base_dir = working_dir

    # Possibly run fixed effects workflow
    lyman.run_workflow(ffx, "ffx", args)

    # -------- #
    # Clean-up
    # -------- #

    if project["rm_working_dir"]:
        shutil.rmtree(working_dir)
Пример #14
0
def workflow_manager(project, exp, args, subj_source):
    """
    # ---------------------- #
    # {workflow_name} Workflow #
    # ---------------------- #

    Each workflow.py file should define a **workflow** method that takes the
    following options and returns a nipype workflow to execute:
       *def workflow(project,exp, args, subj_source)*:

    project
      A dictionary containing information about the directories in the project
      (e.g. working_dir, analysis_dir, data_dir)

    exp
      A dictionary containing information from the experiment config file,
      (e.g. options regarding slice-time correction, model contrasts)

    args
      An ``argparse.Parser`` instance containing command line options from
      ``fitz run``.

    subj_source
      A nipype node containing an IdentityInterface iterable for subjects,
      with the subject label stored as the output {{subject_id}}.

    For more info, see: http://people.fas.harvard.edu/~kastman/fitz/creating_fitz_pipelines.html
    """

    # Define Input Templates
    templates = dict(
        # timeseries=exp['timeseries'],
    )

    # Create Datasource
    source = Node(SelectFiles(templates, base_directory=project["data_dir"]),
                  "{workflow_name}_source")

    # Create main workflow and grab input and output nodes.
    wf, input_node, output_node = workflow_spec(exp_info=exp)

    # Convenience class to handle some sterotyped connections
    # between run-specific nodes (defined here) and the inputs
    # to the prepackaged workflow returned above
    inwrap = fitz.tools.InputWrapper(wf, subj_source, source, input_node)
    inwrap.connect_inputs()

    # Store workflow outputs to persistant location
    sink = Node(DataSink(base_directory=project['analysis_dir']),
                "{workflow_name}_sink")

    # Similar to above, class to handle sterotyped output connections
    outwrap = fitz.tools.OutputWrapper(wf, subj_source, sink, output_node)
    outwrap.set_subject_container()
    # outwrap.set_mapnode_substitutions(exp["n_runs"])
    outwrap.sink_outputs('outdir')

    # Set the base for the possibly temporary working directory
    wf.base_dir = project['working_dir']

    return wf
Пример #15
0
def group_onesample_openfmri(dataset_dir,
                             model_id=None,
                             task_id=None,
                             l1output_dir=None,
                             out_dir=None,
                             no_reversal=False):

    wk = Workflow(name='one_sample')
    wk.base_dir = os.path.abspath(work_dir)

    info = Node(
        util.IdentityInterface(fields=['model_id', 'task_id', 'dataset_dir']),
        name='infosource')
    info.inputs.model_id = model_id
    info.inputs.task_id = task_id
    info.inputs.dataset_dir = dataset_dir

    num_copes = contrasts_num(model_id, task_id, dataset_dir)

    dg = Node(DataGrabber(infields=['model_id', 'task_id', 'cope_id'],
                          outfields=['copes', 'varcopes']),
              name='grabber')
    dg.inputs.template = os.path.join(
        l1output_dir, 'model%03d/task%03d/*/%scopes/mni/%scope%02d.nii.gz')
    dg.inputs.template_args['copes'] = [[
        'model_id', 'task_id', '', '', 'cope_id'
    ]]
    dg.inputs.template_args['varcopes'] = [[
        'model_id', 'task_id', 'var', 'var', 'cope_id'
    ]]
    dg.iterables = ('cope_id', num_copes)

    dg.inputs.sort_filelist = True

    wk.connect(info, 'model_id', dg, 'model_id')
    wk.connect(info, 'task_id', dg, 'task_id')

    model = Node(L2Model(), name='l2model')

    wk.connect(dg, ('copes', get_len), model, 'num_copes')

    mergecopes = Node(Merge(dimension='t'), name='merge_copes')
    wk.connect(dg, 'copes', mergecopes, 'in_files')

    mergevarcopes = Node(Merge(dimension='t'), name='merge_varcopes')
    wk.connect(dg, 'varcopes', mergevarcopes, 'in_files')

    mask_file = fsl.Info.standard_image('MNI152_T1_2mm_brain_mask.nii.gz')
    flame = Node(FLAMEO(), name='flameo')
    flame.inputs.mask_file = mask_file
    flame.inputs.run_mode = 'flame1'

    wk.connect(model, 'design_mat', flame, 'design_file')
    wk.connect(model, 'design_con', flame, 't_con_file')
    wk.connect(mergecopes, 'merged_file', flame, 'cope_file')
    wk.connect(mergevarcopes, 'merged_file', flame, 'var_cope_file')
    wk.connect(model, 'design_grp', flame, 'cov_split_file')

    smoothest = Node(SmoothEstimate(), name='smooth_estimate')
    wk.connect(flame, 'zstats', smoothest, 'zstat_file')
    smoothest.inputs.mask_file = mask_file

    cluster = Node(Cluster(), name='cluster')
    wk.connect(smoothest, 'dlh', cluster, 'dlh')
    wk.connect(smoothest, 'volume', cluster, 'volume')
    cluster.inputs.connectivity = 26
    cluster.inputs.threshold = 2.3
    cluster.inputs.pthreshold = 0.05
    cluster.inputs.out_threshold_file = True
    cluster.inputs.out_index_file = True
    cluster.inputs.out_localmax_txt_file = True

    wk.connect(flame, 'zstats', cluster, 'in_file')

    ztopval = Node(ImageMaths(op_string='-ztop', suffix='_pval'),
                   name='z2pval')
    wk.connect(flame, 'zstats', ztopval, 'in_file')

    sinker = Node(DataSink(), name='sinker')
    sinker.inputs.base_directory = os.path.abspath(out_dir)
    sinker.inputs.substitutions = [('_cope_id', 'contrast'),
                                   ('_maths__', '_reversed_')]

    wk.connect(flame, 'zstats', sinker, 'stats')
    wk.connect(cluster, 'threshold_file', sinker, 'stats.@thr')
    wk.connect(cluster, 'index_file', sinker, 'stats.@index')
    wk.connect(cluster, 'localmax_txt_file', sinker, 'stats.@localmax')

    if no_reversal == False:
        zstats_reverse = Node(BinaryMaths(), name='zstats_reverse')
        zstats_reverse.inputs.operation = 'mul'
        zstats_reverse.inputs.operand_value = -1
        wk.connect(flame, 'zstats', zstats_reverse, 'in_file')

        cluster2 = cluster.clone(name='cluster2')
        wk.connect(smoothest, 'dlh', cluster2, 'dlh')
        wk.connect(smoothest, 'volume', cluster2, 'volume')
        wk.connect(zstats_reverse, 'out_file', cluster2, 'in_file')

        ztopval2 = ztopval.clone(name='ztopval2')
        wk.connect(zstats_reverse, 'out_file', ztopval2, 'in_file')

        wk.connect(zstats_reverse, 'out_file', sinker, 'stats.@neg')
        wk.connect(cluster2, 'threshold_file', sinker, 'stats.@neg_thr')
        wk.connect(cluster2, 'index_file', sinker, 'stats.@neg_index')
        wk.connect(cluster2, 'localmax_txt_file', sinker,
                   'stats.@neg_localmax')

    return wk
def batch_paramatric_GLM(nii_root_dir, sub_num_list, total_session_num,
                         all_sub_dataframe, params_name, contrast_list,
                         cache_folder, result_folder, parallel_cores):

    from nipype import Node, Workflow, Function
    from nipype.interfaces.spm import Level1Design, EstimateModel, EstimateContrast
    from nipype.algorithms.modelgen import SpecifySPMModel
    from nipype.interfaces.utility import IdentityInterface
    from nipype import DataSink

    # Define the helper functions

    def nii_selector(root_dir,
                     sub_num,
                     session_num,
                     all_sub_dataframe,
                     data_type="Smooth_8mm"):
        import os
        import glob
        session_list = ["session" + str(i) for i in range(1, session_num + 1)]
        sub_name = "sub" + str(sub_num)
        # print(file_path)
        nii_list = []
        for s in session_list:
            file_path = os.path.join(root_dir, sub_name, data_type, s)
            nii_list.append(glob.glob(file_path + "/*.nii"))
        single_sub_data = all_sub_dataframe[all_sub_dataframe.Subject_num ==
                                            sub_num]
        return (nii_list, single_sub_data, sub_name)

    def condition_generator(single_sub_data, params_name, duration=2):
        from nipype.interfaces.base import Bunch
        run_num = set(single_sub_data.run)
        subject_info = []
        for i in run_num:
            tmp_table = single_sub_data[single_sub_data.run == i]
            tmp_onset = tmp_table.onset.values.tolist()

            pmod_names = []
            pmod_params = []
            pmod_poly = []
            for param in params_name:
                pmod_params.append(tmp_table[param].values.tolist())
                pmod_names.append(param)
                pmod_poly.append(1)

            tmp_Bunch = Bunch(conditions=["trial_onset_run" + str(i)],
                              onsets=[tmp_onset],
                              durations=[[duration]],
                              pmod=[
                                  Bunch(name=pmod_names,
                                        poly=pmod_poly,
                                        param=pmod_params)
                              ])
            subject_info.append(tmp_Bunch)

        return subject_info

    # Define each Nodes in the workflow

    NiiSelector = Node(Function(
        input_names=[
            "root_dir", "sub_num", "session_num", "all_sub_dataframe",
            "data_type"
        ],
        output_names=["nii_list", "single_sub_data", "sub_name"],
        function=nii_selector),
                       name="NiiSelector")

    ConditionGenerator = Node(Function(
        input_names=["single_sub_data", "params_name", "duration"],
        output_names=["subject_info"],
        function=condition_generator),
                              name="ConditionGenerator")

    glm_input = Node(IdentityInterface(
        fields=['nii_list', 'single_sub_data', 'params_name', 'contrast_list'],
        mandatory_inputs=True),
                     name="glm_input")

    # SpecifyModel - Generates SPM-specific Model
    modelspec = Node(SpecifySPMModel(concatenate_runs=False,
                                     input_units='scans',
                                     output_units='scans',
                                     time_repetition=2,
                                     high_pass_filter_cutoff=128),
                     name="modelspec")

    # Level1Design - Generates an SPM design matrix
    level1design = Node(Level1Design(bases={'hrf': {
        'derivs': [0, 0]
    }},
                                     timing_units='scans',
                                     interscan_interval=2),
                        name="level1design")

    level1estimate = Node(EstimateModel(estimation_method={'Classical': 1}),
                          name="level1estimate")

    level1conest = Node(EstimateContrast(), name="level1conest")

    OutputNode = Node(DataSink(), name="OutputNode")

    # Define the attributes of those nodes

    NiiSelector.inputs.root_dir = nii_root_dir
    NiiSelector.iterables = ("sub_num", sub_num_list)
    NiiSelector.inputs.session_num = total_session_num
    NiiSelector.inputs.data_type = "Smooth_8mm"
    NiiSelector.inputs.all_sub_dataframe = all_sub_dataframe

    glm_input.inputs.params_name = params_name
    glm_input.inputs.contrast_list = contrast_list

    OutputNode.inputs.base_directory = result_folder

    # Define the workflows

    single_sub_GLM_wf = Workflow(name='single_sub_GLM_wf')
    single_sub_GLM_wf.connect([
        (glm_input, ConditionGenerator, [('single_sub_data',
                                          'single_sub_data'),
                                         ('params_name', 'params_name')]),
        (glm_input, modelspec, [('nii_list', 'functional_runs')]),
        (glm_input, level1conest, [('contrast_list', 'contrasts')]),
        (ConditionGenerator, modelspec, [('subject_info', 'subject_info')]),
        (modelspec, level1design, [('session_info', 'session_info')]),
        (level1design, level1estimate, [('spm_mat_file', 'spm_mat_file')]),
        (level1estimate, level1conest, [('spm_mat_file', 'spm_mat_file'),
                                        ('beta_images', 'beta_images'),
                                        ('residual_image', 'residual_image')])
    ])

    batch_GLM_wf = Workflow(name="batch_GLM_wf", base_dir=cache_folder)
    batch_GLM_wf.connect([(NiiSelector, single_sub_GLM_wf, [
        ('nii_list', 'glm_input.nii_list'),
        ('single_sub_data', 'glm_input.single_sub_data')
    ]), (NiiSelector, OutputNode, [('sub_name', 'container')]),
                          (single_sub_GLM_wf, OutputNode,
                           [('level1conest.spm_mat_file', '1stLevel.@spm_mat'),
                            ('level1conest.spmT_images', '1stLevel.@T'),
                            ('level1conest.con_images', '1stLevel.@con'),
                            ('level1conest.spmF_images', '1stLevel.@F'),
                            ('level1conest.ess_images', '1stLevel.@ess')])])

    # Excute the workflow
    batch_GLM_wf.run(plugin='MultiProc',
                     plugin_args={'n_procs': parallel_cores})
Пример #17
0
    def make_workflow(self):
        # Infosource: Iterate through subject names
        infosource = Node(interface=IdentityInterface(fields=['subject_str']),
                          name="infosource")
        infosource.iterables = ('subject_str', self.subject_list)

        # Infosource: Iterate through subject names
        #imgsrc = Node(interface=IdentityInterface(fields=['img']), name="imgsrc")
        #imgsrc.iterables = ('img', ['uni'])

        parse_scanner_dir = Node(
            interface=ParseScannerDir(raw_data_dir=self.raw_data_dir),
            name='parse_scanner_dir')

        ro = Node(interface=fsl.Reorient2Std(), name='ro')

        mv_uni = Node(interface=Rename(format_string='uni_'), name='mv_uni')
        mv_uniden = Node(interface=Rename(format_string='uniden'),
                         name='mv_uniden')
        mv_flair = Node(interface=Rename(format_string='flair'),
                        name='mv_flair')
        mv_bold = Node(interface=Rename(format_string='bold_'), name='mv_bold')
        mv_boldmag1 = Node(interface=Rename(format_string='boldmag1'),
                           name='mv_boldmag1')
        mv_boldmag2 = Node(interface=Rename(format_string='boldmag2'),
                           name='mv_boldmag2')
        mv_phasediff = Node(interface=Rename(format_string='boldphdiff'),
                            name='mv_phasediff')
        sink = Node(interface=DataSink(), name='sink')
        sink.inputs.base_directory = self.bids_root
        #sink.inputs.substitutions = [('mp2rage075iso', '{}'.format(str(sink.inputs._outputs.keys()))),
        #                              ('uni', 'uni.nii.gz')]#,
        #                              ('_uniden_DEN', ''),
        #                              ('DEN_mp2rage_orig_reoriented_masked_maths', 'mUNIbrain_DENskull_SPMmasked'),
        #                              ('_mp2rage_orig_reoriented_maths_maths_bin', '_brain_bin')]
        sink.inputs.regexp_substitutions = [
            (r'_subject_str_2(?P<subid>[0-9][0-9][0-9])T(?P<sesid>[0-9])/uni_',
             r'sub-NeuroMET\g<subid>/ses-0\g<sesid>/anat/sub-NeuroMET\g<subid>_ses-0\g<sesid>_T1w.nii.gz'
             ),
            (r'_subject_str_2(?P<subid>[0-9][0-9][0-9])T(?P<sesid>[0-9])/uniden',
             r'/derivatives/Siemens/sub-NeuroMET\g<subid>/ses-0\g<sesid>/anat/sub-NeuroMET\g<subid>_ses-0\g<sesid>_desc-UNIDEN.nii.gz'
             ),
            (r'_subject_str_2(?P<subid>[0-9][0-9][0-9])T(?P<sesid>[0-9])/flair',
             r'sub-NeuroMET\g<subid>/ses-0\g<sesid>/anat/sub-NeuroMET\g<subid>_ses-0\g<sesid>_FLAIR.nii.gz'
             ),
            (r'_subject_str_2(?P<subid>[0-9][0-9][0-9])T(?P<sesid>[0-9])/bold_',
             r'sub-NeuroMET\g<subid>/ses-0\g<sesid>/func/sub-NeuroMET\g<subid>_ses-0\g<sesid>_task-rest_bold.nii.gz'
             ),
            (r'_subject_str_2(?P<subid>[0-9][0-9][0-9])T(?P<sesid>[0-9])/boldmag1',
             r'sub-NeuroMET\g<subid>/ses-0\g<sesid>/fmap/sub-NeuroMET\g<subid>_ses-0\g<sesid>_magnitude1.nii.gz'
             ),
            (r'_subject_str_2(?P<subid>[0-9][0-9][0-9])T(?P<sesid>[0-9])/boldmag2',
             r'sub-NeuroMET\g<subid>/ses-0\g<sesid>/fmap/sub-NeuroMET\g<subid>_ses-0\g<sesid>_magnitude2.nii.gz'
             ),
            (r'_subject_str_2(?P<subid>[0-9][0-9][0-9])T(?P<sesid>[0-9])/boldphdiff',
             r'sub-NeuroMET\g<subid>/ses-0\g<sesid>/fmap/sub-NeuroMET\g<subid>_ses-0\g<sesid>_phasediff.nii.gz'
             ),
        ]
        #    (r'c1{prefix}(.*).UNI_brain_bin.nii.gz'.format(prefix=self.project_prefix),
        #                                      r'{prefix}\1.UNI_brain_bin.nii.gz'.format(prefix=self.project_prefix)),
        #                                     (r'c1{prefix}(.*).DEN_brain_bin.nii.gz'.format(prefix=self.project_prefix),
        #                                      r'{prefix}\1.DEN_brain_bin.nii.gz'.format(prefix=self.project_prefix))]

        scanner_to_bids = Workflow(name='scanner_to_bids',
                                   base_dir=self.temp_dir)
        #scanner_to_bids.connect(imgsrc, 'img', mv, 'format_string')
        scanner_to_bids.connect(infosource, 'subject_str', parse_scanner_dir,
                                'subject_id')
        scanner_to_bids.connect(parse_scanner_dir, 'uni', mv_uni, 'in_file')
        scanner_to_bids.connect(parse_scanner_dir, 'uniden', mv_uniden,
                                'in_file')
        scanner_to_bids.connect(parse_scanner_dir, 'flair', mv_flair,
                                'in_file')
        scanner_to_bids.connect(parse_scanner_dir, 'bold', mv_bold, 'in_file')
        scanner_to_bids.connect(parse_scanner_dir, 'boldmag1', mv_boldmag1,
                                'in_file')
        scanner_to_bids.connect(parse_scanner_dir, 'boldmag2', mv_boldmag2,
                                'in_file')
        scanner_to_bids.connect(parse_scanner_dir, 'boldphdiff', mv_phasediff,
                                'in_file')
        scanner_to_bids.connect(mv_uni, 'out_file', sink, '@uni')
        scanner_to_bids.connect(mv_uniden, 'out_file', sink, '@uniden')
        scanner_to_bids.connect(mv_flair, 'out_file', sink, '@flair')
        scanner_to_bids.connect(mv_bold, 'out_file', sink, '@bold')
        scanner_to_bids.connect(mv_boldmag1, 'out_file', sink, '@boldmag1')
        scanner_to_bids.connect(mv_boldmag2, 'out_file', sink, '@boldmag2')
        scanner_to_bids.connect(mv_phasediff, 'out_file', sink, '@phasediff')

        return scanner_to_bids
Пример #18
0
def create_ants_workflow(data_dir=None, subjects=None, name="antswarp"):
    """Set up the anatomical normalzation workflow using ANTS.

    Your anatomical data must have been processed in Freesurfer.
    Unlike most lyman workflows, the DataGrabber and DataSink
    nodes are hardwired within the returned workflow, as this
    tightly integrates with the Freesurfer subjects directory
    structure.

    Parameters
    ----------
    data_dir : path
        top level of data hierarchy/FS subjects directory
    subjects : list of strings
        list of subject IDs
    name : alphanumeric string, optional
        workflow name

    """
    if data_dir is None:
        data_dir = os.environ["SUBJECTS_DIR"]
    if subjects is None:
        subjects = []

    # Subject source node
    subjectsource = Node(IdentityInterface(fields=["subject_id"]),
                         iterables=("subject_id", subjects),
                         name="subjectsource")

    # Grab recon-all outputs
    templates = dict(aseg="{subject_id}/mri/aparc+aseg.mgz",
                     head="{subject_id}/mri/brain.mgz")
    datasource = Node(SelectFiles(templates, base_directory=data_dir),
                      "datasource")

    # Convert images to nifti storage and float representation
    cvtaseg = Node(fs.MRIConvert(out_type="niigz"), "convertaseg")

    cvtbrain = Node(fs.MRIConvert(out_type="niigz", out_datatype="float"),
                    "convertbrain")

    # Turn the aparc+aseg into a brainmask
    makemask = Node(fs.Binarize(dilate=4, erode=3, min=0.5), "makemask")

    # Extract the brain from the orig.mgz using the mask
    skullstrip = Node(fsl.ApplyMask(), "skullstrip")

    # Normalize using ANTS
    antswarp = Node(ANTSIntroduction(), "antswarp")

    # Generate a png summarizing the registration
    warpreport = Node(WarpReport(), "warpreport")

    # Save relevant files to the data directory
    datasink = Node(DataSink(base_directory=data_dir, parameterization=False),
                    name="datasink")

    # Define and connect the workflow
    # -------------------------------

    normalize = Workflow(name=name)

    normalize.connect([
        (subjectsource, datasource, [("subject_id", "subject_id")]),
        (datasource, cvtaseg, [("aseg", "in_file")]),
        (datasource, cvtbrain, [("head", "in_file")]),
        (cvtaseg, makemask, [("out_file", "in_file")]),
        (cvtbrain, skullstrip, [("out_file", "in_file")]),
        (makemask, skullstrip, [("binary_file", "mask_file")]),
        (skullstrip, antswarp, [("out_file", "in_file")]),
        (antswarp, warpreport, [("brain_file", "in_file")]),
        (subjectsource, datasink, [("subject_id", "container")]),
        (antswarp, datasink, [("warp_file", "normalization.@warpfield"),
                              ("inv_warp_file",
                               "normalization.@inverse_warpfield"),
                              ("affine_file", "normalization.@affine"),
                              ("brain_file", "normalization.@brain")]),
        (warpreport, datasink, [("out_file", "normalization.@report")]),
    ])

    return normalize
Пример #19
0
def define_preproc_workflow(info, subjects, sessions, qc=True):

    # --- Workflow parameterization and data input

    scan_info = info.scan_info
    experiment = info.experiment_name

    iterables = generate_iterables(scan_info, experiment, subjects, sessions)
    subject_iterables, session_iterables, run_iterables = iterables

    subject_iterables = subjects

    subject_source = Node(IdentityInterface(["subject"]),
                          name="subject_source",
                          iterables=("subject", subject_iterables))

    session_source = Node(IdentityInterface(["subject", "session"]),
                          name="session_source",
                          itersource=("subject_source", "subject"),
                          iterables=("session", session_iterables))

    run_source = Node(IdentityInterface(["subject", "session", "run"]),
                      name="run_source",
                      itersource=("session_source", "session"),
                      iterables=("run", run_iterables))

    session_input = Node(SessionInput(data_dir=info.data_dir,
                                      proc_dir=info.proc_dir,
                                      fm_template=info.fm_template,
                                      phase_encoding=info.phase_encoding),
                         "session_input")

    run_input = Node(RunInput(experiment=experiment,
                              data_dir=info.data_dir,
                              proc_dir=info.proc_dir,
                              sb_template=info.sb_template,
                              ts_template=info.ts_template,
                              crop_frames=info.crop_frames),
                     name="run_input")

    # --- Warpfield estimation using topup

    # Distortion warpfield estimation
    #  TODO figure out how to parameterize for testing
    # topup_config = op.realpath(op.join(__file__, "../../../topup_fast.cnf"))
    topup_config = "b02b0.cnf"
    estimate_distortions = Node(fsl.TOPUP(config=topup_config),
                                "estimate_distortions")

    # Post-process the TOPUP outputs
    finalize_unwarping = Node(FinalizeUnwarping(), "finalize_unwarping")

    # --- Registration of SE-EPI (without distortions) to Freesurfer anatomy

    fm2anat = Node(fs.BBRegister(init="fsl",
                                 contrast_type="t2",
                                 registered_file=True,
                                 out_fsl_file="sess2anat.mat",
                                 out_reg_file="sess2anat.dat"),
                   "fm2anat")

    fm2anat_qc = Node(AnatRegReport(data_dir=info.data_dir), "fm2anat_qc")

    # --- Registration of SBRef to SE-EPI (with distortions)

    sb2fm = Node(fsl.FLIRT(dof=6, interp="spline"), "sb2fm")

    sb2fm_qc = Node(CoregGIF(out_file="coreg.gif"), "sb2fm_qc")

    # --- Motion correction of time series to SBRef (with distortions)

    ts2sb = Node(fsl.MCFLIRT(save_mats=True, save_plots=True),
                 "ts2sb")

    ts2sb_qc = Node(RealignmentReport(), "ts2sb_qc")

    # --- Combined motion correction, unwarping, and template registration

    # Combine pre-and post-warp linear transforms
    combine_premats = MapNode(fsl.ConvertXFM(concat_xfm=True),
                              "in_file", "combine_premats")

    combine_postmats = Node(fsl.ConvertXFM(concat_xfm=True),
                            "combine_postmats")

    # Transform Jacobian images into the template space
    transform_jacobian = Node(fsl.ApplyWarp(relwarp=True),
                              "transform_jacobian")

    # Apply rigid transforms and nonlinear warpfield to time series frames
    restore_timeseries = MapNode(fsl.ApplyWarp(interp="spline", relwarp=True),
                                 ["in_file", "premat"],
                                 "restore_timeseries")

    # Apply rigid transforms and nonlinear warpfield to template frames
    restore_template = MapNode(fsl.ApplyWarp(interp="spline", relwarp=True),
                               ["in_file", "premat", "field_file"],
                               "restore_template")

    # Perform final preprocessing operations on timeseries
    finalize_timeseries = Node(FinalizeTimeseries(experiment=experiment),
                               "finalize_timeseries")

    # Perform final preprocessing operations on template
    finalize_template = JoinNode(FinalizeTemplate(experiment=experiment),
                                 name="finalize_template",
                                 joinsource="run_source",
                                 joinfield=["mean_files", "tsnr_files",
                                            "mask_files", "noise_files"])

    # --- Workflow ouptut

    save_info = Node(SaveInfo(info_dict=info.trait_get()), "save_info")

    template_output = Node(DataSink(base_directory=info.proc_dir,
                                    parameterization=False),
                           "template_output")

    timeseries_output = Node(DataSink(base_directory=info.proc_dir,
                                      parameterization=False),
                             "timeseries_output")

    # === Assemble pipeline

    cache_base = op.join(info.cache_dir, info.experiment_name)
    workflow = Workflow(name="preproc", base_dir=cache_base)

    # Connect processing nodes

    processing_edges = [

        (subject_source, session_source,
            [("subject", "subject")]),
        (subject_source, run_source,
            [("subject", "subject")]),
        (session_source, run_source,
            [("session", "session")]),
        (session_source, session_input,
            [("session", "session")]),
        (run_source, run_input,
            [("run", "run")]),

        # Phase-encode distortion estimation

        (session_input, estimate_distortions,
            [("fm_file", "in_file"),
             ("phase_encoding", "encoding_direction"),
             ("readout_times", "readout_times")]),
        (session_input, finalize_unwarping,
            [("fm_file", "raw_file"),
             ("phase_encoding", "phase_encoding")]),
        (estimate_distortions, finalize_unwarping,
            [("out_corrected", "corrected_file"),
             ("out_warps", "warp_files"),
             ("out_jacs", "jacobian_files")]),

        # Registration of corrected SE-EPI to anatomy

        (session_input, fm2anat,
            [("subject", "subject_id")]),
        (finalize_unwarping, fm2anat,
            [("corrected_file", "source_file")]),

        # Registration of each frame to SBRef image

        (run_input, ts2sb,
            [("ts_file", "in_file"),
             ("sb_file", "ref_file")]),
        (ts2sb, finalize_timeseries,
            [("par_file", "mc_file")]),

        # Registration of SBRef volume to SE-EPI fieldmap

        (run_input, sb2fm,
            [("sb_file", "in_file")]),
        (finalize_unwarping, sb2fm,
            [("raw_file", "reference"),
             ("mask_file", "ref_weight")]),

        # Single-interpolation spatial realignment and unwarping

        (ts2sb, combine_premats,
            [("mat_file", "in_file")]),
        (sb2fm, combine_premats,
            [("out_matrix_file", "in_file2")]),
        (fm2anat, combine_postmats,
            [("out_fsl_file", "in_file")]),
        (session_input, combine_postmats,
            [("reg_file", "in_file2")]),

        (run_input, transform_jacobian,
            [("anat_file", "ref_file")]),
        (finalize_unwarping, transform_jacobian,
            [("jacobian_file", "in_file")]),
        (combine_postmats, transform_jacobian,
            [("out_file", "premat")]),

        (run_input, restore_timeseries,
            [("ts_frames", "in_file")]),
        (run_input, restore_timeseries,
            [("anat_file", "ref_file")]),
        (combine_premats, restore_timeseries,
            [("out_file", "premat")]),
        (finalize_unwarping, restore_timeseries,
            [("warp_file", "field_file")]),
        (combine_postmats, restore_timeseries,
            [("out_file", "postmat")]),
        (run_input, finalize_timeseries,
            [("run_tuple", "run_tuple"),
             ("anat_file", "anat_file"),
             ("seg_file", "seg_file"),
             ("mask_file", "mask_file")]),
        (transform_jacobian, finalize_timeseries,
            [("out_file", "jacobian_file")]),
        (restore_timeseries, finalize_timeseries,
            [("out_file", "in_files")]),

        (session_input, restore_template,
            [("fm_frames", "in_file"),
             ("anat_file", "ref_file")]),
        (estimate_distortions, restore_template,
            [("out_mats", "premat"),
             ("out_warps", "field_file")]),
        (combine_postmats, restore_template,
            [("out_file", "postmat")]),
        (session_input, finalize_template,
            [("session_tuple", "session_tuple"),
             ("seg_file", "seg_file"),
             ("anat_file", "anat_file")]),
        (transform_jacobian, finalize_template,
            [("out_file", "jacobian_file")]),
        (restore_template, finalize_template,
            [("out_file", "in_files")]),

        (finalize_timeseries, finalize_template,
            [("mean_file", "mean_files"),
             ("tsnr_file", "tsnr_files"),
             ("mask_file", "mask_files"),
             ("noise_file", "noise_files")]),

        # --- Persistent data storage

        # Ouputs associated with each scanner run

        (finalize_timeseries, timeseries_output,
            [("output_path", "container"),
             ("out_file", "@func"),
             ("mean_file", "@mean"),
             ("mask_file", "@mask"),
             ("tsnr_file", "@tsnr"),
             ("noise_file", "@noise"),
             ("mc_file", "@mc")]),

        # Ouputs associated with the session template

        (finalize_template, template_output,
            [("output_path", "container"),
             ("out_file", "@func"),
             ("mean_file", "@mean"),
             ("tsnr_file", "@tsnr"),
             ("mask_file", "@mask"),
             ("noise_file", "@noise")]),

    ]
    workflow.connect(processing_edges)

    # Optionally connect QC nodes

    qc_edges = [

        # Registration of each frame to SBRef image

        (run_input, ts2sb_qc,
            [("sb_file", "target_file")]),
        (ts2sb, ts2sb_qc,
            [("par_file", "realign_params")]),

        # Registration of corrected SE-EPI to anatomy

        (session_input, fm2anat_qc,
            [("subject", "subject_id")]),
        (fm2anat, fm2anat_qc,
            [("registered_file", "in_file"),
             ("min_cost_file", "cost_file")]),

        # Registration of SBRef volume to SE-EPI fieldmap

        (sb2fm, sb2fm_qc,
            [("out_file", "in_file")]),
        (finalize_unwarping, sb2fm_qc,
            [("raw_file", "ref_file")]),

        # Ouputs associated with each scanner run

        (run_source, save_info,
            [("run", "parameterization")]),
        (save_info, timeseries_output,
            [("info_file", "qc.@info_json")]),

        (run_input, timeseries_output,
            [("ts_plot", "qc.@raw_gif")]),
        (sb2fm_qc, timeseries_output,
            [("out_file", "qc.@sb2fm_gif")]),
        (ts2sb_qc, timeseries_output,
            [("params_plot", "qc.@params_plot"),
             ("target_plot", "qc.@target_plot")]),
        (finalize_timeseries, timeseries_output,
            [("out_gif", "qc.@ts_gif"),
             ("out_png", "qc.@ts_png"),
             ("mask_plot", "qc.@mask_plot"),
             ("mean_plot", "qc.@ts_mean_plot"),
             ("tsnr_plot", "qc.@ts_tsnr_plot"),
             ("noise_plot", "qc.@noise_plot")]),

        # Outputs associated with the session template

        (finalize_unwarping, template_output,
            [("warp_plot", "qc.@warp_png"),
             ("unwarp_gif", "qc.@unwarp_gif")]),
        (fm2anat_qc, template_output,
            [("out_file", "qc.@reg_png")]),
        (finalize_template, template_output,
            [("out_plot", "qc.@func_png"),
             ("mean_plot", "qc.@mean"),
             ("tsnr_plot", "qc.@tsnr"),
             ("mask_plot", "qc.@mask"),
             ("noise_plot", "qc.@noise")]),

    ]

    if qc:
        workflow.connect(qc_edges)

    return workflow
Пример #20
0
    # SPM Normalise (Estimate & Reslice)
    norm12 = Node(interface=spm.Normalize12(), name="normalize")
    #spm.Normalize12().input_spec.gm_output_type = [True, False, False] # Should be modulated normalised

    # Smooth
    smooth = Node(interface=spm.Smooth(), name="smooth")
    fwhmlist = [8]
    smooth.iterables = (
        'fwhm', fwhmlist
    )  # Iterables seem to follow the convention of variable.input[key] = value of the iterable.

    # ## Set up the file management through ds to be able to wipe out unnecessary files?
    #  - There are all sorts of files labelled under the type of processing that produced them in each subject's folder, so I am unclear why this is a useful management step. It simply adds a "ds" folder next to the processing folders.

    # Collect key output files in a useful place
    ds = Node(interface=DataSink(), name="ds")
    ds.inputs.base_directory = op.abspath(output_dir)
    ds.substitutions = [('_subj_id', ''), ('_task', ''), ('_t1_', '')]

    # ## Connect the nodes.
    # #### Tip 4: Think of the connections like the "dependency" fields in SPM's batch editor.

    # TODO Add first-level estimations

    preproc_wf.connect([
        (infosource, sf, [('subj_id', 'subj_id'), ('task', 'task'),
                          ('t1', 't1'), ('timept', 'timept')]),
        (infosource, ds, [('subj_id', 'container')]),
        (sf, realign, [('func', 'in_files')]),
        (realign, ds, [('realignment_parameters', 'motion.@param')]),
        (sf, coreg, [('struct', 'target')]),
Пример #21
0
    hncma_atlas = os.path.join(subject_directory, "WarpedAtlas2Subject",
                               "hncma_atlas.nii.gz")
    direction_files = dict()
    for name in ["rho", "phi", "theta"]:
        direction_files[name] = os.path.join(subject_directory,
                                             "WarpedAtlas2Subject",
                                             "{0}.nii.gz".format(name))

    lh_white_surface_file = os.path.join(subject_directory, "FreeSurfer",
                                         "surf", "lh.white")
    rh_white_surface_file = os.path.join(subject_directory, "FreeSurfer",
                                         "surf", "rh.white")

    logb_wf = create_logismosb_machine_learning_workflow()
    wf = Workflow("MachineLearning_Baseline_{0}".format(session_id))
    datasink = Node(DataSink(), name="DataSink")
    datasink.inputs.base_directory = os.path.join(results_dir, session_id)
    for hemisphere in ("lh", "rh"):
        for matter in ("gm", "wm"):
            wf.connect(
                logb_wf,
                "output_spec.{0}_{1}surface_file".format(hemisphere, matter),
                datasink,
                "EdgePrediction.@{0}_{1}".format(hemisphere, matter),
            )

    logb_wf.inputs.input_spec.t1_file = t1_file
    logb_wf.inputs.input_spec.orig_t1 = t1_file
    logb_wf.inputs.input_spec.t2_file = t2_file
    logb_wf.inputs.input_spec.posteriors = posterior_files
    logb_wf.inputs.input_spec.hncma_file = hncma_atlas
Пример #22
0
datasource.inputs.base_directory = data_dir
datasource.inputs.template = '*'
datasource.inputs.field_template = {'t1c': '%s*.nii.gz'}
datasource.inputs.template_args = {'t1c': [['subject_id']]}
datasource.inputs.sort_filelist = True
datasource.inputs.subject_id = subjects

seg = Node(ants.BrainExtraction(), name='seg', synchronize=True)
seg.inputs.dimension = 3
seg.inputs.keep_temporary_files = 1
seg.inputs.brain_template = op.join(template_dir, 'T_template0.nii.gz')
seg.inputs.brain_probability_mask = op.join(
    template_dir, 'T_template0_BrainCerebellumProbabilityMask.nii.gz')

# Node to save output files. This does not work. Why?
sinker = Node(DataSink(), name='sinker')
# Add container.
sinker.inputs.base_directory = op.abspath('antsBrainExtraction_output')

# Workflow.
wf = Workflow(name='antsBrainExtraction', base_dir='/om/scratch/Wed/jakubk')
wf.connect(datasource, 't1c', seg, 'anatomical_image')
wf.connect(datasource, 'subject_id', seg, 'out_prefix')
wf.connect(seg, 'BrainExtractionBrain', sinker, 'brain')
wf.connect(seg, 'BrainExtractionMask', sinker, 'brain_masks')
wf.connect(seg, 'BrainExtractionSegmentation', sinker, 'seg_full')
wf.connect(seg, 'BrainExtractionCSF', sinker, 'csf')
wf.connect(seg, 'BrainExtractionGM', sinker, 'gm')
wf.connect(seg, 'BrainExtractionWM', sinker, 'wm')
wf.run(plugin='SLURM', plugin_args={'sbatch_args': '--mem=50GB'})
Пример #23
0
def create_preproc_workflow(session):
    """
    Defines simple functional preprocessing workflow, including motion
    correction, registration to distortion scans, and unwarping. Assumes
    recon-all has been performed on T1, and computes but does not apply
    registration to anatomy.
    """

    #---Create workflow---
    wf = Workflow(name='workflow', base_dir=session['working_dir'])

    #---EPI Realignment---

    # Realign every TR in each functional run to the sbref image using mcflirt
    realign = MapNode(fsl.MCFLIRT(ref_file=session['sbref'],
                                  save_mats=True,
                                  save_plots=True),
                      iterfield=['in_file'],
                      name='realign')
    realign.inputs.in_file = session['epis']
    wf.add_nodes([realign])

    #---Registration to distortion scan---

    # Register the sbref scan to the distortion scan with the same PE using flirt
    reg2dist = Node(fsl.FLIRT(in_file=session['sbref'],
                              reference=session['distort_PE'],
                              out_file='sbref_reg.nii.gz',
                              out_matrix_file='sbref2dist.mat',
                              dof=6),
                    name='reg2distort')
    wf.add_nodes([reg2dist])

    #---Distortion correction---

    # Merge the two distortion scans for unwarping
    distort_scans = [session['distort_PE'], session['distort_revPE']]
    merge_dist = Node(fsl.Merge(in_files=distort_scans,
                                dimension='t',
                                merged_file='distortion_merged.nii.gz'),
                      name='merge_distort')
    wf.add_nodes([merge_dist])

    # Run topup to estimate warpfield and create unwarped distortion scans
    if '-' not in session['PE_dim']:
        PEs = np.repeat([session['PE_dim'], session['PE_dim'] + '-'], 3)
    else:
        PEs = np.repeat(
            [session['PE_dim'], session['PE_dim'].replace('-', '')], 3)
    unwarp_dist = Node(fsl.TOPUP(encoding_direction=list(PEs),
                                 readout_times=[1, 1, 1, 1, 1, 1],
                                 config='b02b0.cnf',
                                 fwhm=0),
                       name='unwarp_distort')
    wf.connect(merge_dist, 'merged_file', unwarp_dist, 'in_file')

    # Unwarp sbref image in case it's useful
    unwarp_sbref = Node(fsl.ApplyTOPUP(in_index=[1], method='jac'),
                        name='unwarp_sbref')
    wf.connect([(reg2dist, unwarp_sbref, [('out_file', 'in_files')]),
                (unwarp_dist, unwarp_sbref,
                 [('out_enc_file', 'encoding_file'),
                  ('out_fieldcoef', 'in_topup_fieldcoef'),
                  ('out_movpar', 'in_topup_movpar')])])

    #---Registration to anatomy---

    # Create mean unwarped distortion scan
    mean_unwarped_dist = Node(fsl.MeanImage(dimension='T'),
                              name='mean_unwarped_distort')
    wf.connect(unwarp_dist, 'out_corrected', mean_unwarped_dist, 'in_file')

    # Register mean unwarped distortion scan to anatomy using bbregister
    reg2anat = Node(fs.BBRegister(
        subject_id=session['Freesurfer_subject_name'],
        contrast_type='t2',
        init='fsl',
        out_reg_file='distort2anat_tkreg.dat',
        out_fsl_file='distort2anat_flirt.mat'),
                    name='reg2anat')
    wf.connect(mean_unwarped_dist, 'out_file', reg2anat, 'source_file')

    #---Combine and apply transforms to EPIs---

    # Split EPI runs into 3D files
    split_epis = MapNode(fsl.Split(dimension='t'),
                         iterfield=['in_file'],
                         name='split_epis')
    split_epis.inputs.in_file = session['epis']
    wf.add_nodes([split_epis])

    # Combine the rigid transforms to be applied to each EPI volume
    concat_rigids = MapNode(fsl.ConvertXFM(concat_xfm=True),
                            iterfield=['in_file'],
                            nested=True,
                            name='concat_rigids')
    wf.connect([(realign, concat_rigids, [('mat_file', 'in_file')]),
                (reg2dist, concat_rigids, [('out_matrix_file', 'in_file2')])])

    # Apply rigid transforms and warpfield to each EPI volume
    correct_epis = MapNode(fsl.ApplyWarp(interp='spline', relwarp=True),
                           iterfield=['in_file', 'ref_file', 'premat'],
                           nested=True,
                           name='correct_epis')

    get_warp = lambda warpfields: warpfields[0]
    wf.connect([(split_epis, correct_epis, [('out_files', 'in_file'),
                                            ('out_files', 'ref_file')]),
                (concat_rigids, correct_epis, [('out_file', 'premat')]),
                (unwarp_dist, correct_epis, [(('out_warps', get_warp),
                                              'field_file')])])

    # Merge processed files back into 4D nifti
    merge_epis = MapNode(fsl.Merge(dimension='t',
                                   merged_file='timeseries_corrected.nii.gz'),
                         iterfield='in_files',
                         name='merge_epis')
    wf.connect([(correct_epis, merge_epis, [('out_file', 'in_files')])])

    #---Copy important files to main directory---
    substitutions = [('_merge_epis%d/timeseries_corrected.nii.gz' % i, n)
                     for i, n in enumerate(session['out_names'])]
    ds = Node(DataSink(base_directory=os.path.abspath(session['out']),
                       substitutions=substitutions),
              name='outfiles')
    wf.connect(unwarp_dist, 'out_corrected', ds, '@unwarp_dist')
    wf.connect(mean_unwarped_dist, 'out_file', ds, '@mean_unwarped_dist')
    wf.connect(unwarp_sbref, 'out_corrected', ds, '@unwarp_sbref')
    wf.connect(reg2anat, 'out_reg_file', ds, '@reg2anat')
    wf.connect(merge_epis, 'merged_file', ds, '@merge_epis')

    return wf
Пример #24
0
def main(arglist):
    """Main function for workflow setup and execution."""
    args = parse_args(arglist)

    # Get and process specific information
    project = lyman.gather_project_info()
    exp = lyman.gather_experiment_info(args.experiment, args.altmodel, args)

    if args.experiment is None:
        args.experiment = project["default_exp"]

    if args.altmodel:
        exp_name = "-".join([args.experiment, args.altmodel])
    else:
        exp_name = args.experiment

    # Make sure some paths are set properly
    os.environ["SUBJECTS_DIR"] = project["data_dir"]

    # Set roots of output storage
    anal_dir_base = op.join(project["analysis_dir"], exp_name)
    work_dir_base = op.join(project["working_dir"], exp_name)
    nipype.config.set("execution", "crashdump_dir", project["crash_dir"])

    # Subject source (no iterables here)
    subject_list = lyman.determine_subjects(args.subjects)
    subj_source = Node(IdentityInterface(fields=["subject_id"]),
                       name="subj_source")
    subj_source.inputs.subject_id = subject_list

    # Set up the regressors and contrasts
    regressors = dict(group_mean=[1] * len(subject_list))
    contrasts = [["group_mean", "T", ["group_mean"], [1]]]

    # Subject level contrast source
    contrast_source = Node(IdentityInterface(fields=["l1_contrast"]),
                           iterables=("l1_contrast", exp["contrast_names"]),
                           name="contrast_source")

    # Group workflow
    space = args.regspace
    wf_name = "_".join([space, args.output])
    if space == "mni":
        mfx, mfx_input, mfx_output = wf.create_volume_mixedfx_workflow(
            wf_name, subject_list, regressors, contrasts, exp)
    else:
        mfx, mfx_input, mfx_output = wf.create_surface_ols_workflow(
            wf_name, subject_list, exp)

    # Mixed effects inputs
    ffxspace = "mni" if space == "mni" else "epi"
    ffxsmooth = "unsmoothed" if args.unsmoothed else "smoothed"
    mfx_base = op.join("{subject_id}/ffx/%s/%s/{l1_contrast}" %
                       (ffxspace, ffxsmooth))
    templates = dict(copes=op.join(mfx_base, "cope1.nii.gz"))
    if space == "mni":
        templates.update(
            dict(varcopes=op.join(mfx_base, "varcope1.nii.gz"),
                 dofs=op.join(mfx_base, "tdof_t1.nii.gz")))
    else:
        templates.update(
            dict(reg_file=op.join(anal_dir_base, "{subject_id}/reg/epi/",
                                  ffxsmooth, "run_1/func2anat_tkreg.dat")))

    # Workflow source node
    mfx_source = MapNode(
        SelectFiles(templates,
                    base_directory=anal_dir_base,
                    sort_filelist=True), "subject_id", "mfx_source")

    # Workflow input connections
    mfx.connect([
        (contrast_source, mfx_source, [("l1_contrast", "l1_contrast")]),
        (contrast_source, mfx_input, [("l1_contrast", "l1_contrast")]),
        (subj_source, mfx_source, [("subject_id", "subject_id")]),
        (mfx_source, mfx_input, [("copes", "copes")])
    ]),
    if space == "mni":
        mfx.connect([
            (mfx_source, mfx_input, [("varcopes", "varcopes"),
                                     ("dofs", "dofs")]),
        ])
    else:
        mfx.connect([(mfx_source, mfx_input, [("reg_file", "reg_file")]),
                     (subj_source, mfx_input, [("subject_id", "subject_id")])])

    # Mixed effects outputs
    mfx_sink = Node(DataSink(base_directory="/".join(
        [anal_dir_base, args.output, space]),
                             substitutions=[("/stats", "/"), ("/_hemi_", "/"),
                                            ("_glm_results", "")],
                             parameterization=True),
                    name="mfx_sink")

    mfx_outwrap = tools.OutputWrapper(mfx, subj_source, mfx_sink, mfx_output)
    mfx_outwrap.sink_outputs()
    mfx_outwrap.set_mapnode_substitutions(1)
    mfx_outwrap.add_regexp_substitutions([(r"_l1_contrast_[-\w]*/", "/"),
                                          (r"_mni_hemi_[lr]h", "")])
    mfx.connect(contrast_source, "l1_contrast", mfx_sink, "container")

    # Set a few last things
    mfx.base_dir = work_dir_base

    # Execute
    lyman.run_workflow(mfx, args=args)

    # Clean up
    if project["rm_working_dir"]:
        shutil.rmtree(project["working_dir"])
Пример #25
0
def define_template_workflow(info, subjects, qc=True):

    # --- Workflow parameterization

    subject_source = Node(IdentityInterface(["subject"]),
                          name="subject_source",
                          iterables=("subject", subjects))

    # Data input
    template_input = Node(TemplateInput(data_dir=info.data_dir),
                          "template_input")

    # --- Definition of functional template space

    crop_image = Node(fs.ApplyMask(args="-bb 4"), "crop_image")

    zoom_image = Node(fs.MRIConvert(resample_type="cubic",
                                    out_type="niigz",
                                    vox_size=info.voxel_size,
                                    ),
                      "zoom_image")

    reorient_image = Node(fsl.Reorient2Std(out_file="anat.nii.gz"),
                          "reorient_image")

    generate_reg = Node(fs.Tkregister2(fsl_out="anat2func.mat",
                                       reg_file="anat2func.dat",
                                       reg_header=True),
                        "generate_reg")

    invert_reg = Node(fs.Tkregister2(reg_file="func2anat.dat",
                                     reg_header=True),
                      "invert_reg")

    # --- Identification of surface vertices

    hemi_source = Node(IdentityInterface(["hemi"]), "hemi_source",
                       iterables=("hemi", ["lh", "rh"]))

    tag_surf = Node(fs.Surface2VolTransform(surf_name="graymid",
                                            transformed_file="ribbon.nii.gz",
                                            vertexvol_file="vertices.nii.gz",
                                            mkmask=True),
                    "tag_surf")

    mask_cortex = Node(MaskWithLabel(fill_value=-1), "mask_cortex")

    combine_hemis = JoinNode(fsl.Merge(dimension="t",
                                       merged_file="surf.nii.gz"),
                             name="combine_hemis",
                             joinsource="hemi_source",
                             joinfield="in_files")

    make_ribbon = Node(MakeRibbon(), "make_ribbon")

    # --- Segementation of anatomical tissue in functional space

    transform_wmparc = Node(fs.ApplyVolTransform(inverse=True,
                                                 interp="nearest",
                                                 args="--keep-precision"),
                            "transform_wmparc")

    anat_segment = Node(AnatomicalSegmentation(), "anat_segment")

    # --- Template QC

    template_qc = Node(TemplateReport(), "template_qc")

    # --- Workflow ouptut

    save_info = Node(SaveInfo(info_dict=info.trait_get()), "save_info")

    template_output = Node(DataSink(base_directory=info.proc_dir,
                                    parameterization=False),
                           "template_output")

    # === Assemble pipeline

    workflow = Workflow(name="template", base_dir=info.cache_dir)

    processing_edges = [

        (subject_source, template_input,
            [("subject", "subject")]),
        (template_input, crop_image,
            [("norm_file", "in_file"),
             ("wmparc_file", "mask_file")]),
        (crop_image, zoom_image,
            [("out_file", "in_file")]),
        (zoom_image, reorient_image,
            [("out_file", "in_file")]),

        (subject_source, generate_reg,
            [("subject", "subject_id")]),
        (template_input, generate_reg,
            [("norm_file", "moving_image")]),
        (reorient_image, generate_reg,
            [("out_file", "target_image")]),

        (subject_source, invert_reg,
            [("subject", "subject_id")]),
        (template_input, invert_reg,
            [("norm_file", "target_image")]),
        (reorient_image, invert_reg,
            [("out_file", "moving_image")]),

        (hemi_source, tag_surf,
            [("hemi", "hemi")]),
        (invert_reg, tag_surf,
            [("reg_file", "reg_file")]),
        (reorient_image, tag_surf,
            [("out_file", "template_file")]),
        (template_input, mask_cortex,
            [("label_files", "label_files")]),
        (hemi_source, mask_cortex,
            [("hemi", "hemi")]),
        (tag_surf, mask_cortex,
            [("vertexvol_file", "in_file")]),
        (mask_cortex, combine_hemis,
            [("out_file", "in_files")]),
        (combine_hemis, make_ribbon,
            [("merged_file", "in_file")]),

        (reorient_image, transform_wmparc,
            [("out_file", "source_file")]),
        (template_input, transform_wmparc,
            [("wmparc_file", "target_file")]),
        (invert_reg, transform_wmparc,
            [("reg_file", "reg_file")]),
        (reorient_image, anat_segment,
            [("out_file", "anat_file")]),
        (transform_wmparc, anat_segment,
            [("transformed_file", "wmparc_file")]),
        (combine_hemis, anat_segment,
            [("merged_file", "surf_file")]),

        (template_input, template_output,
            [("output_path", "container")]),
        (reorient_image, template_output,
            [("out_file", "@anat")]),
        (generate_reg, template_output,
            [("fsl_file", "@anat2func")]),
        (anat_segment, template_output,
            [("seg_file", "@seg"),
             ("lut_file", "@lut"),
             ("edge_file", "@edge"),
             ("mask_file", "@mask")]),
        (combine_hemis, template_output,
            [("merged_file", "@surf")]),
        (make_ribbon, template_output,
            [("out_file", "@ribon")]),

    ]
    workflow.connect(processing_edges)

    # Optionally connect QC nodes

    qc_edges = [

        (reorient_image, template_qc,
            [("out_file", "anat_file")]),
        (combine_hemis, template_qc,
            [("merged_file", "surf_file")]),
        (anat_segment, template_qc,
            [("lut_file", "lut_file"),
             ("seg_file", "seg_file"),
             ("edge_file", "edge_file"),
             ("mask_file", "mask_file")]),

        (subject_source, save_info,
            [("subject", "parameterization")]),
        (save_info, template_output,
            [("info_file", "qc.@info_json")]),

        (template_qc, template_output,
            [("seg_plot", "qc.@seg_plot"),
             ("mask_plot", "qc.@mask_plot"),
             ("edge_plot", "qc.@edge_plot"),
             ("surf_plot", "qc.@surf_plot"),
             ("anat_plot", "qc.@anat_plot")]),

    ]
    if qc:
        workflow.connect(qc_edges)

    return workflow
Пример #26
0
def main(arglist):
    """Main function for workflow setup and execution."""
    args = parse_args(arglist)

    # Get and process specific information
    project = lyman.gather_project_info()
    exp = lyman.gather_experiment_info(args.experiment, args.altmodel)

    if args.experiment is None:
        args.experiment = project["default_exp"]

    if args.altmodel:
        exp_name = "-".join([args.experiment, args.altmodel])
    else:
        exp_name = args.experiment

    # Make sure some paths are set properly
    os.environ["SUBJECTS_DIR"] = project["data_dir"]

    # Set roots of output storage
    anal_dir_base = op.join(project["analysis_dir"], exp_name)
    work_dir_base = op.join(project["working_dir"], exp_name)
    nipype.config.set("execution", "crashdump_dir", project["crash_dir"])

    ### Set up group info
    ## Regular design
    group_info = pd.read_csv(group_filepath)

    # Subject source (no iterables here)
    subject_list = lyman.determine_subjects(args.subjects)
    # Additional code (deletion caught by Dan dillon)
    subj_source = Node(IdentityInterface(fields=["subject_id"]),
                       name="subj_source")
    subj_source.inputs.subject_id = subject_list

    print(group_info)
    print(subject_list)

    groups = [
        group_info[group_info.subid == x].reset_index().at[0, 'group']
        for x in subject_list
    ]
    group_vector = [1 if sub == "group1" else 2
                    for sub in groups]  # 1 for group1, 2 for group2

    # Set up the regressors and contrasts
    regressors = dict(group1_mean=[int(sub == 'group1') for sub in groups],
                      group2_mean=[int(sub == 'group2') for sub in groups])
    print(regressors)

    # DECIDE WHICH CONTRAST YOU WANT HERE:
    contrasts = [[
        contrast_name, "T", ["group1_mean", "group2_mean"], contrast_vals
    ]]

    print('Using this contrast:')
    print(contrast_name)
    print(contrast_vals)

    # Subject level contrast source
    contrast_source = Node(IdentityInterface(fields=["l1_contrast"]),
                           iterables=("l1_contrast", exp["contrast_names"]),
                           name="contrast_source")

    # Group workflow
    space = args.regspace
    wf_name = "_".join([space, args.output])
    if space == "mni":
        mfx, mfx_input, mfx_output = wf.create_volume_mixedfx_workflow_groups(
            wf_name, subject_list, regressors, contrasts, exp, group_vector)
    else:
        mfx, mfx_input, mfx_output = wf.create_surface_ols_workflow(
            wf_name, subject_list, exp)

    # Mixed effects inputs
    ffxspace = "mni" if space == "mni" else "epi"
    ffxsmooth = "unsmoothed" if args.unsmoothed else "smoothed"
    mfx_base = op.join("{subject_id}/ffx/%s/%s/{l1_contrast}" %
                       (ffxspace, ffxsmooth))
    templates = dict(copes=op.join(mfx_base, "cope1.nii.gz"))
    if space == "mni":
        templates.update(
            dict(varcopes=op.join(mfx_base, "varcope1.nii.gz"),
                 dofs=op.join(mfx_base, "tdof_t1.nii.gz")))
    else:
        templates.update(
            dict(reg_file=op.join(anal_dir_base, "{subject_id}/preproc/run_1",
                                  "func2anat_tkreg.dat")))

    # Workflow source node
    mfx_source = MapNode(
        SelectFiles(templates,
                    base_directory=anal_dir_base,
                    sort_filelist=True), "subject_id", "mfx_source")

    # Workflow input connections
    mfx.connect([
        (contrast_source, mfx_source, [("l1_contrast", "l1_contrast")]),
        (contrast_source, mfx_input, [("l1_contrast", "l1_contrast")]),
        (subj_source, mfx_source, [("subject_id", "subject_id")]),
        (mfx_source, mfx_input, [("copes", "copes")])
    ]),
    if space == "mni":
        mfx.connect([
            (mfx_source, mfx_input, [("varcopes", "varcopes"),
                                     ("dofs", "dofs")]),
        ])
    else:
        mfx.connect([(mfx_source, mfx_input, [("reg_file", "reg_file")]),
                     (subj_source, mfx_input, [("subject_id", "subject_id")])])

    # Mixed effects outputs
    mfx_sink = Node(DataSink(base_directory="/".join(
        [anal_dir_base, args.output, space]),
                             substitutions=[("/stats", "/"), ("/_hemi_", "/"),
                                            ("_glm_results", "")],
                             parameterization=True),
                    name="mfx_sink")

    mfx_outwrap = tools.OutputWrapper(mfx, subj_source, mfx_sink, mfx_output)
    mfx_outwrap.sink_outputs()
    mfx_outwrap.set_mapnode_substitutions(1)
    mfx_outwrap.add_regexp_substitutions([(r"_l1_contrast_[-\w]*/", "/"),
                                          (r"_mni_hemi_[lr]h", "")])
    mfx.connect(contrast_source, "l1_contrast", mfx_sink, "container")

    # Set a few last things
    mfx.base_dir = work_dir_base

    # Execute
    lyman.run_workflow(mfx, args=args)

    # Clean up
    if project["rm_working_dir"]:
        shutil.rmtree(project["working_dir"])
def group_multregress_openfmri(dataset_dir,
                               model_id=None,
                               task_id=None,
                               l1output_dir=None,
                               out_dir=None,
                               no_reversal=False,
                               plugin=None,
                               plugin_args=None,
                               flamemodel='flame1',
                               nonparametric=False,
                               use_spm=False):

    meta_workflow = Workflow(name='mult_regress')
    meta_workflow.base_dir = work_dir
    for task in task_id:
        task_name = get_taskname(dataset_dir, task)
        cope_ids = l1_contrasts_num(model_id, task_name, dataset_dir)
        regressors_needed, contrasts, groups, subj_list = get_sub_vars(
            dataset_dir, task_name, model_id)
        for idx, contrast in enumerate(contrasts):
            wk = Workflow(name='model_%03d_task_%03d_contrast_%s' %
                          (model_id, task, contrast[0][0]))

            info = Node(util.IdentityInterface(
                fields=['model_id', 'task_id', 'dataset_dir', 'subj_list']),
                        name='infosource')
            info.inputs.model_id = model_id
            info.inputs.task_id = task
            info.inputs.dataset_dir = dataset_dir

            dg = Node(DataGrabber(infields=['model_id', 'task_id', 'cope_id'],
                                  outfields=['copes', 'varcopes']),
                      name='grabber')
            dg.inputs.template = os.path.join(
                l1output_dir,
                'model%03d/task%03d/%s/%scopes/%smni/%scope%02d.nii%s')
            if use_spm:
                dg.inputs.template_args['copes'] = [[
                    'model_id', 'task_id', subj_list, '', 'spm/', '',
                    'cope_id', ''
                ]]
                dg.inputs.template_args['varcopes'] = [[
                    'model_id', 'task_id', subj_list, 'var', 'spm/', 'var',
                    'cope_id', '.gz'
                ]]
            else:
                dg.inputs.template_args['copes'] = [[
                    'model_id', 'task_id', subj_list, '', '', '', 'cope_id',
                    '.gz'
                ]]
                dg.inputs.template_args['varcopes'] = [[
                    'model_id', 'task_id', subj_list, 'var', '', 'var',
                    'cope_id', '.gz'
                ]]
            dg.iterables = ('cope_id', cope_ids)
            dg.inputs.sort_filelist = False

            wk.connect(info, 'model_id', dg, 'model_id')
            wk.connect(info, 'task_id', dg, 'task_id')

            model = Node(MultipleRegressDesign(), name='l2model')
            model.inputs.groups = groups
            model.inputs.contrasts = contrasts[idx]
            model.inputs.regressors = regressors_needed[idx]

            mergecopes = Node(Merge(dimension='t'), name='merge_copes')
            wk.connect(dg, 'copes', mergecopes, 'in_files')

            if flamemodel != 'ols':
                mergevarcopes = Node(Merge(dimension='t'),
                                     name='merge_varcopes')
                wk.connect(dg, 'varcopes', mergevarcopes, 'in_files')

            mask_file = fsl.Info.standard_image(
                'MNI152_T1_2mm_brain_mask.nii.gz')
            flame = Node(FLAMEO(), name='flameo')
            flame.inputs.mask_file = mask_file
            flame.inputs.run_mode = flamemodel
            #flame.inputs.infer_outliers = True

            wk.connect(model, 'design_mat', flame, 'design_file')
            wk.connect(model, 'design_con', flame, 't_con_file')
            wk.connect(mergecopes, 'merged_file', flame, 'cope_file')
            if flamemodel != 'ols':
                wk.connect(mergevarcopes, 'merged_file', flame,
                           'var_cope_file')
            wk.connect(model, 'design_grp', flame, 'cov_split_file')

            if nonparametric:
                palm = Node(Function(input_names=[
                    'cope_file', 'design_file', 'contrast_file', 'group_file',
                    'mask_file', 'cluster_threshold'
                ],
                                     output_names=['palm_outputs'],
                                     function=run_palm),
                            name='palm')
                palm.inputs.cluster_threshold = 3.09
                palm.inputs.mask_file = mask_file
                palm.plugin_args = {
                    'sbatch_args': '-p om_all_nodes -N1 -c2 --mem=10G',
                    'overwrite': True
                }
                wk.connect(model, 'design_mat', palm, 'design_file')
                wk.connect(model, 'design_con', palm, 'contrast_file')
                wk.connect(mergecopes, 'merged_file', palm, 'cope_file')
                wk.connect(model, 'design_grp', palm, 'group_file')

            smoothest = Node(SmoothEstimate(), name='smooth_estimate')
            wk.connect(flame, 'zstats', smoothest, 'zstat_file')
            smoothest.inputs.mask_file = mask_file

            cluster = Node(Cluster(), name='cluster')
            wk.connect(smoothest, 'dlh', cluster, 'dlh')
            wk.connect(smoothest, 'volume', cluster, 'volume')
            cluster.inputs.connectivity = 26
            cluster.inputs.threshold = 2.3
            cluster.inputs.pthreshold = 0.05
            cluster.inputs.out_threshold_file = True
            cluster.inputs.out_index_file = True
            cluster.inputs.out_localmax_txt_file = True

            wk.connect(flame, 'zstats', cluster, 'in_file')

            ztopval = Node(ImageMaths(op_string='-ztop', suffix='_pval'),
                           name='z2pval')
            wk.connect(flame, 'zstats', ztopval, 'in_file')

            sinker = Node(DataSink(), name='sinker')
            sinker.inputs.base_directory = os.path.join(
                out_dir, 'task%03d' % task, contrast[0][0])
            sinker.inputs.substitutions = [('_cope_id', 'contrast'),
                                           ('_maths_', '_reversed_')]

            wk.connect(flame, 'zstats', sinker, 'stats')
            wk.connect(cluster, 'threshold_file', sinker, 'stats.@thr')
            wk.connect(cluster, 'index_file', sinker, 'stats.@index')
            wk.connect(cluster, 'localmax_txt_file', sinker, 'stats.@localmax')
            if nonparametric:
                wk.connect(palm, 'palm_outputs', sinker, 'stats.palm')

            if not no_reversal:
                zstats_reverse = Node(BinaryMaths(), name='zstats_reverse')
                zstats_reverse.inputs.operation = 'mul'
                zstats_reverse.inputs.operand_value = -1
                wk.connect(flame, 'zstats', zstats_reverse, 'in_file')

                cluster2 = cluster.clone(name='cluster2')
                wk.connect(smoothest, 'dlh', cluster2, 'dlh')
                wk.connect(smoothest, 'volume', cluster2, 'volume')
                wk.connect(zstats_reverse, 'out_file', cluster2, 'in_file')

                ztopval2 = ztopval.clone(name='ztopval2')
                wk.connect(zstats_reverse, 'out_file', ztopval2, 'in_file')

                wk.connect(zstats_reverse, 'out_file', sinker, 'stats.@neg')
                wk.connect(cluster2, 'threshold_file', sinker,
                           'stats.@neg_thr')
                wk.connect(cluster2, 'index_file', sinker, 'stats.@neg_index')
                wk.connect(cluster2, 'localmax_txt_file', sinker,
                           'stats.@neg_localmax')
            meta_workflow.add_nodes([wk])
    return meta_workflow