예제 #1
0
def test_serial_input():
    cwd = os.getcwd()
    wd = mkdtemp()
    os.chdir(wd)
    from nipype import MapNode, Function, Workflow
    def func1(in1):
        return in1
    n1 = MapNode(Function(input_names=['in1'],
                          output_names=['out'],
                          function=func1),
                 iterfield=['in1'],
                 name='n1')
    n1.inputs.in1 = [1,2,3]


    w1 = Workflow(name='test')
    w1.base_dir = wd
    w1.add_nodes([n1])
    # set local check
    w1.config['execution'] = {'stop_on_first_crash': 'true',
                              'local_hash_check': 'true',
                              'crashdump_dir': wd}

    # test output of num_subnodes method when serial is default (False)
    yield assert_equal, n1.num_subnodes(), len(n1.inputs.in1)

    # test running the workflow on default conditions
    error_raised = False
    try:
        w1.run(plugin='MultiProc')
    except Exception, e:
        pe.logger.info('Exception: %s' % str(e))
        error_raised = True
예제 #2
0
파일: workflow.py 프로젝트: gpiantoni/grvx
def workflow_ieeg(parameters):
    node_read = Node(function_ieeg_read, name='read')
    node_read.inputs.active_conditions = parameters['ieeg']['read']['active_conditions']
    node_read.inputs.baseline_conditions = parameters['ieeg']['read']['baseline_conditions']
    node_read.inputs.minimalduration = parameters['ieeg']['read']['minimalduration']

    node_preprocess = MapNode(function_ieeg_preprocess, name='preprocess', iterfield=['ieeg', ])
    node_preprocess.inputs.duration = parameters['ieeg']['preprocess']['duration']
    node_preprocess.inputs.reref = parameters['ieeg']['preprocess']['reref']
    node_preprocess.inputs.offset = parameters['ieeg']['preprocess']['offset']

    node_frequency = MapNode(function_ieeg_powerspectrum, name='powerspectrum', iterfield=['ieeg', ])
    node_frequency.inputs.method = parameters['ieeg']['powerspectrum']['method']
    node_frequency.inputs.taper = parameters['ieeg']['powerspectrum']['taper']
    node_frequency.inputs.halfbandwidth = parameters['ieeg']['powerspectrum']['halfbandwidth']
    node_frequency.inputs.duration = parameters['ieeg']['powerspectrum']['duration']

    node_compare = Node(function_ieeg_compare, name='ecog_compare')
    node_compare.iterables = (
        'frequency', parameters['ieeg']['ecog_compare']['frequency_bands'],
        )
    node_compare.inputs.baseline = parameters['ieeg']['ecog_compare']['baseline']
    node_compare.inputs.method = parameters['ieeg']['ecog_compare']['method']
    node_compare.inputs.measure = parameters['ieeg']['ecog_compare']['measure']

    node_compare_allfreq = Node(function_ieeg_compare_allfreq, name='ecog_compare_allfreq')

    w = Workflow('ieeg')

    w.connect(node_read, 'ieeg', node_preprocess, 'ieeg')
    w.connect(node_preprocess, 'ieeg', node_frequency, 'ieeg')
    w.connect(node_frequency, 'ieeg', node_compare, 'in_files')
    w.connect(node_frequency, 'ieeg', node_compare_allfreq, 'in_files')

    return w
예제 #3
0
def create_templates_2func_workflow(threshold=0.5,
                                    name='templates_2func_workflow'):
    templates_2func_workflow = Workflow(name=name)

    # Input Node
    inputspec = Node(utility.IdentityInterface(fields=[
        'func_file',
        'premat',
        'warp',
        'templates',
    ]),
                     name='inputspec')

    # Get the overal EPI to MNI warp
    func_2mni_warp = Node(fsl.ConvertWarp(), name='func_2mni_warp')
    func_2mni_warp.inputs.reference = fsl.Info.standard_image(
        'MNI152_T1_2mm.nii.gz')

    # Calculate the inverse warp
    mni_2func_warp = Node(fsl.InvWarp(), name='mni_2func_warp')

    # Transform MNI templates to EPI space
    templates_2func_apply = MapNode(fsl.ApplyWarp(),
                                    iterfield=['in_file'],
                                    name='templates_2func_apply')

    # Threshold templates
    templates_threshold = MapNode(
        fsl.ImageMaths(op_string='-thr {0} -bin'.format(threshold)),
        iterfield=['in_file'],
        name='templates_threshold')

    # Output Node
    outputspec = Node(utility.IdentityInterface(
        fields=['templates_2func_files', 'func_2mni_warp']),
                      name='outputspec')

    # Connect the workflow nodes
    templates_2func_workflow.connect(inputspec, 'premat', func_2mni_warp,
                                     'premat')
    templates_2func_workflow.connect(inputspec, 'warp', func_2mni_warp,
                                     'warp1')
    templates_2func_workflow.connect(inputspec, 'func_file', mni_2func_warp,
                                     'reference')
    templates_2func_workflow.connect(func_2mni_warp, 'out_file',
                                     mni_2func_warp, 'warp')
    templates_2func_workflow.connect(inputspec, 'templates',
                                     templates_2func_apply, 'in_file')
    templates_2func_workflow.connect(inputspec, 'func_file',
                                     templates_2func_apply, 'ref_file')
    templates_2func_workflow.connect(mni_2func_warp, 'inverse_warp',
                                     templates_2func_apply, 'field_file')
    templates_2func_workflow.connect(templates_2func_apply, 'out_file',
                                     templates_threshold, 'in_file')
    templates_2func_workflow.connect(func_2mni_warp, 'out_file', outputspec,
                                     'func_2mni_warp')
    templates_2func_workflow.connect(templates_threshold, 'out_file',
                                     outputspec, 'templates_2func_files')

    return templates_2func_workflow
예제 #4
0
def test_mapnode_nested(tmpdir):
    os.chdir(str(tmpdir))
    from nipype import MapNode, Function

    def func1(in1):
        return in1 + 1

    n1 = MapNode(Function(input_names=['in1'],
                          output_names=['out'],
                          function=func1),
                 iterfield=['in1'],
                 nested=True,
                 name='n1')
    n1.inputs.in1 = [[1, [2]], 3, [4, 5]]
    n1.run()
    print(n1.get_output('out'))
    assert n1.get_output('out') == [[2, [3]], 4, [5, 6]]

    n2 = MapNode(Function(input_names=['in1'],
                          output_names=['out'],
                          function=func1),
                 iterfield=['in1'],
                 nested=False,
                 name='n1')
    n2.inputs.in1 = [[1, [2]], 3, [4, 5]]
    error_raised = False
    try:
        n2.run()
    except Exception as e:
        from nipype.pipeline.engine.base import logger
        logger.info('Exception: %s' % str(e))
        error_raised = True
    assert error_raised
예제 #5
0
def test_mapnode_nested():
    cwd = os.getcwd()
    wd = mkdtemp()
    os.chdir(wd)
    from nipype import MapNode, Function

    def func1(in1):
        return in1 + 1

    n1 = MapNode(Function(input_names=['in1'],
                          output_names=['out'],
                          function=func1),
                 iterfield=['in1'],
                 nested=True,
                 name='n1')
    n1.inputs.in1 = [[1, [2]], 3, [4, 5]]
    n1.run()
    print(n1.get_output('out'))
    yield assert_equal, n1.get_output('out'), [[2, [3]], 4, [5, 6]]

    n2 = MapNode(Function(input_names=['in1'],
                          output_names=['out'],
                          function=func1),
                 iterfield=['in1'],
                 nested=False,
                 name='n1')
    n2.inputs.in1 = [[1, [2]], 3, [4, 5]]
    error_raised = False
    try:
        n2.run()
    except Exception as e:
        pe.logger.info('Exception: %s' % str(e))
        error_raised = True
    yield assert_true, error_raised
예제 #6
0
def test_mapnode_nested(tmpdir):
    tmpdir.chdir()
    from nipype import MapNode, Function

    def func1(in1):
        return in1 + 1
    n1 = MapNode(Function(input_names=['in1'],
                          output_names=['out'],
                          function=func1),
                 iterfield=['in1'],
                 nested=True,
                 name='n1')
    n1.inputs.in1 = [[1, [2]], 3, [4, 5]]
    n1.run()
    print(n1.get_output('out'))
    assert n1.get_output('out') == [[2, [3]], 4, [5, 6]]

    n2 = MapNode(Function(input_names=['in1'],
                          output_names=['out'],
                          function=func1),
                 iterfield=['in1'],
                 nested=False,
                 name='n1')
    n2.inputs.in1 = [[1, [2]], 3, [4, 5]]

    with pytest.raises(Exception) as excinfo:
        n2.run()
    assert "can only concatenate list" in str(excinfo.value)
예제 #7
0
def test_serial_input(tmpdir):
    tmpdir.chdir()
    wd = os.getcwd()
    from nipype import MapNode, Function, Workflow

    def func1(in1):
        return in1
    n1 = MapNode(Function(input_names=['in1'],
                          output_names=['out'],
                          function=func1),
                 iterfield=['in1'],
                 name='n1')
    n1.inputs.in1 = [1, 2, 3]

    w1 = Workflow(name='test')
    w1.base_dir = wd
    w1.add_nodes([n1])
    # set local check
    w1.config['execution'] = {'stop_on_first_crash': 'true',
                              'local_hash_check': 'true',
                              'crashdump_dir': wd,
                              'poll_sleep_duration': 2}

    # test output of num_subnodes method when serial is default (False)
    assert n1.num_subnodes() == len(n1.inputs.in1)

    # test running the workflow on default conditions
    w1.run(plugin='MultiProc')

    # test output of num_subnodes method when serial is True
    n1._serial = True
    assert n1.num_subnodes() == 1

    # test running the workflow on serial conditions
    w1.run(plugin='MultiProc')
예제 #8
0
def workflow_ieeg(PARAMETERS):

    input = Node(IdentityInterface(fields=['ieeg', 'electrodes']), name='input')

    node_read = Node(function_ieeg_read, name='read')
    node_read.inputs.conditions = PARAMETERS['read']['conditions']
    node_read.inputs.minimalduration = PARAMETERS['read']['minimalduration']

    node_preprocess = MapNode(function_ieeg_preprocess, name='preprocess', iterfield=['ieeg', ])
    node_preprocess.inputs.duration = PARAMETERS['preprocess']['duration']
    node_preprocess.inputs.reref = PARAMETERS['preprocess']['reref']
    node_preprocess.inputs.offset = PARAMETERS['preprocess']['offset']

    node_frequency = MapNode(function_ieeg_powerspectrum, name='powerspectrum', iterfield=['ieeg', ])
    node_frequency.inputs.method = PARAMETERS['powerspectrum']['method']
    node_frequency.inputs.taper = PARAMETERS['powerspectrum']['taper']
    node_frequency.inputs.duration = PARAMETERS['powerspectrum']['duration']

    node_compare = Node(function_ieeg_compare, name='ecog_compare')
    node_compare.inputs.frequency = PARAMETERS['ecog_compare']['frequency']
    node_compare.inputs.baseline = PARAMETERS['ecog_compare']['baseline']
    node_compare.inputs.method = PARAMETERS['ecog_compare']['method']
    node_compare.inputs.measure = PARAMETERS['ecog_compare']['measure']

    w = Workflow('ieeg')

    w.connect(input, 'ieeg', node_read, 'ieeg')
    w.connect(input, 'electrodes', node_read, 'electrodes')
    w.connect(node_read, 'ieeg', node_preprocess, 'ieeg')
    w.connect(node_preprocess, 'ieeg', node_frequency, 'ieeg')
    w.connect(node_frequency, 'ieeg', node_compare, 'in_files')

    return w
    def run(self, n_pipeline_jobs=1):
        """Perform transformations.

        Args:
            n_pipeline_jobs (int, optional): number of parallel processing jobs. Defaults to 1.
        """        
        if not os.path.exists(self.strOutputDir):
            os.makedirs(self.strOutputDir)
        strJobListPath = os.path.join(self.strOutputDir, 'joblist.csv')
        self.dfConfig.to_csv(strJobListPath)

        datanode = Node(utility.csv.CSVReader(in_file=os.path.abspath(strJobListPath), header=True),
                        name='datanode')

        augment = Workflow('augmentation_affinereg', base_dir=os.path.join(self.strOutputDir, 'working_dir'))

        transformFunc = MapNode(fsl.ApplyXFM(interp='spline', apply_xfm=True), name='transform_func',
                                iterfield=['in_file', 'reference', 'in_matrix_file', 'out_file'])
        augment.connect(datanode, 'func', transformFunc, 'in_file')
        augment.connect(datanode, 'func', transformFunc, 'reference')
        augment.connect(datanode, 'affine', transformFunc, 'in_matrix_file')
        augment.connect(datanode, 'output_func', transformFunc, 'out_file')

        transformAnat = MapNode(fsl.ApplyXFM(interp='spline', apply_xfm=True), name='transform_anat',
                                iterfield=['in_file', 'reference', 'in_matrix_file', 'out_file'])
        augment.connect(datanode, 'anat', transformAnat, 'in_file')
        augment.connect(datanode, 'anat', transformAnat, 'reference')
        augment.connect(datanode, 'affine', transformAnat, 'in_matrix_file')
        augment.connect(datanode, 'output_anat', transformAnat, 'out_file')

        if n_pipeline_jobs == 1:
            augment.run()
        else:
            augment.run(plugin='MultiProc', plugin_args={'n_procs': n_pipeline_jobs})
예제 #10
0
def create_bbregister_workflow(name="bbregister",
                               contrast_type="t2",
                               partial_brain=False,
                               init_with="fsl"):
    """Find a linear transformation to align the EPI file with the anatomy."""
    in_fields = ["subject_id", "timeseries"]
    if partial_brain:
        in_fields.append("whole_brain_template")
    inputnode = Node(IdentityInterface(in_fields), "inputs")

    # Take the mean over time to get a target volume
    meanvol = MapNode(fsl.MeanImage(), "in_file", "meanvol")

    # Do a rough skullstrip using BET
    skullstrip = MapNode(fsl.BET(), "in_file", "bet")

    # Estimate the registration to Freesurfer conformed space
    func2anat = MapNode(
        fs.BBRegister(contrast_type=contrast_type,
                      init=init_with,
                      epi_mask=True,
                      registered_file=True,
                      out_reg_file="func2anat_tkreg.dat",
                      out_fsl_file="func2anat_flirt.mat"), "source_file",
        "func2anat")

    # Make an image for quality control on the registration
    report = MapNode(CoregReport(), "in_file", "coreg_report")

    # Define the workflow outputs
    outputnode = Node(IdentityInterface(["tkreg_mat", "flirt_mat", "report"]),
                      "outputs")

    bbregister = Workflow(name=name)

    # Connect the registration
    bbregister.connect([
        (inputnode, func2anat, [("subject_id", "subject_id")]),
        (inputnode, report, [("subject_id", "subject_id")]),
        (inputnode, meanvol, [("timeseries", "in_file")]),
        (meanvol, skullstrip, [("out_file", "in_file")]),
        (skullstrip, func2anat, [("out_file", "source_file")]),
        (func2anat, report, [("registered_file", "in_file")]),
        (func2anat, outputnode, [("out_reg_file", "tkreg_mat")]),
        (func2anat, outputnode, [("out_fsl_file", "flirt_mat")]),
        (report, outputnode, [("out_file", "report")]),
    ])

    # Possibly connect the full_fov image
    if partial_brain:
        bbregister.connect([
            (inputnode, func2anat, [("whole_brain_template",
                                     "intermediate_file")]),
        ])

    return bbregister
예제 #11
0
def test_serial_input():
    cwd = os.getcwd()
    wd = mkdtemp()
    os.chdir(wd)
    from nipype import MapNode, Function, Workflow

    def func1(in1):
        return in1

    n1 = MapNode(Function(input_names=['in1'],
                          output_names=['out'],
                          function=func1),
                 iterfield=['in1'],
                 name='n1')
    n1.inputs.in1 = [1, 2, 3]

    w1 = Workflow(name='test')
    w1.base_dir = wd
    w1.add_nodes([n1])
    # set local check
    w1.config['execution'] = {
        'stop_on_first_crash': 'true',
        'local_hash_check': 'true',
        'crashdump_dir': wd,
        'poll_sleep_duration': 2
    }

    # test output of num_subnodes method when serial is default (False)
    yield assert_equal, n1.num_subnodes(), len(n1.inputs.in1)

    # test running the workflow on default conditions
    error_raised = False
    try:
        w1.run(plugin='MultiProc')
    except Exception as e:
        from nipype.pipeline.engine.base import logger
        logger.info('Exception: %s' % str(e))
        error_raised = True
    yield assert_false, error_raised

    # test output of num_subnodes method when serial is True
    n1._serial = True
    yield assert_equal, n1.num_subnodes(), 1

    # test running the workflow on serial conditions
    error_raised = False
    try:
        w1.run(plugin='MultiProc')
    except Exception as e:
        from nipype.pipeline.engine.base import logger
        logger.info('Exception: %s' % str(e))
        error_raised = True
    yield assert_false, error_raised

    os.chdir(cwd)
    rmtree(wd)
예제 #12
0
def test_mapnode_nested():
    cwd = os.getcwd()
    wd = mkdtemp()
    os.chdir(wd)
    from nipype import MapNode, Function
    def func1(in1):
        return in1 + 1
    n1 = MapNode(Function(input_names=['in1'],
                          output_names=['out'],
                          function=func1),
                 iterfield=['in1'],
                 nested=True,
                 name='n1')
    n1.inputs.in1 = [[1,[2]],3,[4,5]]
    n1.run()
    print n1.get_output('out')
    yield assert_equal, n1.get_output('out'), [[2,[3]],4,[5,6]]

    n2 = MapNode(Function(input_names=['in1'],
                          output_names=['out'],
                          function=func1),
                 iterfield=['in1'],
                 nested=False,
                 name='n1')
    n2.inputs.in1 = [[1,[2]],3,[4,5]]
    error_raised = False
    try:
        n2.run()
    except Exception, e:
        pe.logger.info('Exception: %s' % str(e))
        error_raised = True
예제 #13
0
def test_mapnode_iterfield_type(x_inp, f_exp):
    from nipype import MapNode, Function
    def double_func(x):
        return 2 * x
    double = Function(["x"], ["f_x"], double_func)

    double_node = MapNode(double, name="double", iterfield=["x"])
    double_node.inputs.x = x_inp

    res  = double_node.run()
    assert res.outputs.f_x == f_exp
예제 #14
0
def run_bet(T1_image, workdir):
    """Run freesurfer, convert to nidm and extract stats
    """
    from nipype import fsl
    from nipype import MapNode

    strip = MapNode(fsl.BET(), iterfield=['in_file'], name='skullstripper')
    strip.inputs.in_file = T1_image
    strip.inputs.mesh = True
    strip.inputs.mask = True
    strip.base_dir = workdir

    bet_results = strip.run()
    provgraph = bet_results.provenance[0]
    for bundle in bet_results.provenance[1:]:
        provgraph.add_bundle(bundle)

    vol = MapNode(fsl.ImageStats(op_string='-V'),
                  iterfield=['in_file'],
                  name='volumeextractor')
    vol.inputs.in_file = bet_results.outputs.out_file
    vol.base_dir = workdir
    vol_results = vol.run()
    for bundle in vol_results.provenance:
        provgraph.add_bundle(bundle)

    return provgraph, provgraph.rdf()
예제 #15
0
def create_slicetime_workflow(name="slicetime",
                              TR=2,
                              slice_order="up",
                              interleaved=False):

    inputnode = Node(IdentityInterface(["timeseries"]), "inputs")

    if isinstance(interleaved, str) and interleaved.lower() == "siemens":

        sliceorder = MapNode(SiemensSliceOrder(), "in_file", "sliceorder")
        slicetimer_set_interleaved = False
        slicetimer_iterfields = ["in_file", "custom_order"]

    elif isinstance(interleaved, bool):

        sliceorder = None
        slicetimer_set_interleaved = interleaved
        slicetimer_iterfields = ["in_file"]

    else:

        raise ValueError("interleaved must be True, False, or 'siemens'")

    slicetimer = MapNode(fsl.SliceTimer(time_repetition=TR),
                         slicetimer_iterfields, "slicetime")

    if slicetimer_set_interleaved:
        slicetimer.inputs.interleaved = True

    if slice_order == "down":
        slicetimer.inputs.index_dir = True
    elif slice_order != "up":
        raise ValueError("slice_order must be 'up' or 'down'")

    outputnode = Node(IdentityInterface(["timeseries"]), "outputs")

    slicetime = Workflow(name)
    slicetime.connect([
        (inputnode, slicetimer, [("timeseries", "in_file")]),
        (slicetimer, outputnode, [("slice_time_corrected_file", "timeseries")
                                  ]),
    ])

    if sliceorder is not None:
        slicetime.connect([
            (inputnode, sliceorder, [("timeseries", "in_file")]),
            (sliceorder, slicetimer, [("out_file", "custom_order")]),
        ])

    return slicetime
예제 #16
0
def test_serial_input(tmpdir):
    wd = str(tmpdir)
    os.chdir(wd)
    from nipype import MapNode, Function, Workflow

    def func1(in1):
        return in1
    n1 = MapNode(Function(input_names=['in1'],
                          output_names=['out'],
                          function=func1),
                 iterfield=['in1'],
                 name='n1')
    n1.inputs.in1 = [1, 2, 3]

    w1 = Workflow(name='test')
    w1.base_dir = wd
    w1.add_nodes([n1])
    # set local check
    w1.config['execution'] = {'stop_on_first_crash': 'true',
                              'local_hash_check': 'true',
                              'crashdump_dir': wd,
                              'poll_sleep_duration': 2}

    # test output of num_subnodes method when serial is default (False)
    assert n1.num_subnodes() == len(n1.inputs.in1)

    # test running the workflow on default conditions
    error_raised = False
    try:
        w1.run(plugin='MultiProc')
    except Exception as e:
        from nipype.pipeline.engine.base import logger
        logger.info('Exception: %s' % str(e))
        error_raised = True
    assert not error_raised

    # test output of num_subnodes method when serial is True
    n1._serial = True
    assert n1.num_subnodes() == 1

    # test running the workflow on serial conditions
    error_raised = False
    try:
        w1.run(plugin='MultiProc')
    except Exception as e:
        from nipype.pipeline.engine.base import logger
        logger.info('Exception: %s' % str(e))
        error_raised = True

    assert not error_raised
예제 #17
0
def test_mapnode_nested(tmpdir):
    os.chdir(str(tmpdir))
    from nipype import MapNode, Function

    def func1(in1):
        return in1 + 1
    n1 = MapNode(Function(input_names=['in1'],
                          output_names=['out'],
                          function=func1),
                 iterfield=['in1'],
                 nested=True,
                 name='n1')
    n1.inputs.in1 = [[1, [2]], 3, [4, 5]]
    n1.run()
    print(n1.get_output('out'))
    assert n1.get_output('out') == [[2, [3]], 4, [5, 6]]

    n2 = MapNode(Function(input_names=['in1'],
                          output_names=['out'],
                          function=func1),
                 iterfield=['in1'],
                 nested=False,
                 name='n1')
    n2.inputs.in1 = [[1, [2]], 3, [4, 5]]
    error_raised = False
    try:
        n2.run()
    except Exception as e:
        from nipype.pipeline.engine.base import logger
        logger.info('Exception: %s' % str(e))
        error_raised = True
    assert error_raised
예제 #18
0
def create_workflow_temporalpatterns_7T(subjects, runs):


    input_node = Node(IdentityInterface(fields=[
        'bold',
        'events',
        't2star_fov',
        't2star_whole',
        't1w',
        ]), name='input')

    coreg_tstat = MapNode(
        interface=FLIRT(), name='realign_result_to_anat',
        iterfield=['in_file', ])
    coreg_tstat.inputs.apply_xfm = True

    w = Workflow('temporalpatterns_7T')

    w_preproc = create_workflow_preproc_spm()
    w_spatialobject = create_workflow_temporalpatterns_fsl()
    w_coreg = create_workflow_coreg_epi2t1w()

    w.connect(input_node, 'bold', w_preproc, 'input.bold')
    w.connect(input_node, 'events', w_spatialobject, 'input.events')
    w.connect(input_node, 't2star_fov', w_coreg, 'input.t2star_fov')
    w.connect(input_node, 't2star_whole', w_coreg, 'input.t2star_whole')
    w.connect(input_node, 't1w', w_coreg, 'input.t1w')
    w.connect(input_node, 't1w', coreg_tstat, 'reference')
    w.connect(w_preproc, 'realign.realigned_files', w_spatialobject, 'input.bold')
    w.connect(w_preproc, 'realign.mean_image', w_coreg, 'input.bold_mean')

    w.connect(w_spatialobject, 'output.T_image', coreg_tstat, 'in_file')
    w.connect(w_coreg, 'output.mat_epi2t1w', coreg_tstat, 'in_matrix_file')

    return w
예제 #19
0
def test_mapnode_expansion(tmpdir):
    tmpdir.chdir()
    from nipype import MapNode, Function

    def func1(in1):
        return in1 + 1

    mapnode = MapNode(
        Function(function=func1), iterfield="in1", name="mapnode", n_procs=2, mem_gb=2
    )
    mapnode.inputs.in1 = [1, 2]

    for idx, node in mapnode._make_nodes():
        for attr in ("overwrite", "run_without_submitting", "plugin_args"):
            assert getattr(node, attr) == getattr(mapnode, attr)
        for attr in ("_n_procs", "_mem_gb"):
            assert getattr(node, attr) == getattr(mapnode, attr)
예제 #20
0
def create_filtering_workflow(name="filter",
                              hpf_cutoff=128,
                              TR=2,
                              output_name="timeseries"):
    """Scale and high-pass filter the timeseries."""
    inputnode = Node(IdentityInterface(["timeseries", "mask_file"]), "inputs")

    # Grand-median scale within the brain mask
    scale = MapNode(ScaleTimeseries(statistic="median", target=10000),
                    ["in_file", "mask_file"], "scale")

    # Gaussian running-line filter
    if hpf_cutoff is None:
        hpf_sigma = -1
    else:
        hpf_sigma = (hpf_cutoff / 2.0) / TR
    filter = MapNode(fsl.TemporalFilter(highpass_sigma=hpf_sigma), "in_file",
                     "filter")

    # Possibly replace the mean
    # (In later versions of FSL, the highpass filter removes the
    # mean component. Put it back, but be flexible so this isn't
    # broken on older versions of FSL).
    replacemean = MapNode(ReplaceMean(output_name=output_name),
                          ["orig_file", "filtered_file"], "replacemean")

    # Compute a final mean functional volume
    meanfunc = MapNode(fsl.MeanImage(out_file="mean_func.nii.gz"), "in_file",
                       "meanfunc")

    outputnode = Node(IdentityInterface(["timeseries", "mean_file"]),
                      "outputs")

    filtering = Workflow(name)
    filtering.connect([
        (inputnode, scale, [("timeseries", "in_file"),
                            ("mask_file", "mask_file")]),
        (scale, filter, [("out_file", "in_file")]),
        (scale, replacemean, [("out_file", "orig_file")]),
        (filter, replacemean, [("out_file", "filtered_file")]),
        (replacemean, meanfunc, [("out_file", "in_file")]),
        (replacemean, outputnode, [("out_file", "timeseries")]),
        (meanfunc, outputnode, [("out_file", "mean_file")]),
    ])

    return filtering
예제 #21
0
def create_segments_2func_workflow(threshold=0.5,
                                   name='segments_2func_workflow'):
    segments_2func_workflow = Workflow(name=name)

    # Input Node
    inputspec = Node(
        utility.IdentityInterface(fields=['segments', 'premat', 'func_file']),
        name='inputspec')

    # Calculate inverse matrix of EPI to T1
    anat_2func_matrix = Node(fsl.ConvertXFM(invert_xfm=True),
                             name='anat_2func_matrix')

    # Transform segments to EPI space
    segments_2func_apply = MapNode(fsl.ApplyXFM(),
                                   iterfield=['in_file'],
                                   name='segments_2func_apply')

    # Threshold segments
    segments_threshold = MapNode(
        fsl.ImageMaths(op_string='-thr {0} -bin'.format(threshold)),
        iterfield=['in_file'],
        name='segments_threshold')

    # Output Node
    outputspec = Node(utility.IdentityInterface(
        fields=['segments_2func_files', 'anat_2func_matrix_file']),
                      name='outputspec')

    segments_2func_workflow.connect(inputspec, 'premat', anat_2func_matrix,
                                    'in_file')
    segments_2func_workflow.connect(inputspec, 'segments',
                                    segments_2func_apply, 'in_file')
    segments_2func_workflow.connect(inputspec, 'func_file',
                                    segments_2func_apply, 'reference')
    segments_2func_workflow.connect(anat_2func_matrix, 'out_file',
                                    segments_2func_apply, 'in_matrix_file')
    segments_2func_workflow.connect(segments_2func_apply, 'out_file',
                                    segments_threshold, 'in_file')
    segments_2func_workflow.connect(anat_2func_matrix, 'out_file', outputspec,
                                    'anat_2func_matrix_file')
    segments_2func_workflow.connect(segments_threshold, 'out_file', outputspec,
                                    'segments_2func_files')

    return segments_2func_workflow
예제 #22
0
def create_nonbrain_meansignal(name='nonbrain_meansignal'):

    nonbrain_meansignal = Workflow(name=name)

    inputspec = Node(utility.IdentityInterface(fields=['func_file']),
                     name='inputspec')

    # Split raw 4D functional image into 3D niftis
    split_image = Node(fsl.Split(dimension='t', output_type='NIFTI'),
                       name='split_image')

    # Create a brain mask for each of the 3D images
    brain_mask = MapNode(fsl.BET(frac=0.3,
                                 mask=True,
                                 no_output=True,
                                 robust=True),
                         iterfield=['in_file'],
                         name='brain_mask')

    # Merge the 3D masks into a 4D nifti (producing a separate mask per volume)
    merge_mask = Node(fsl.Merge(dimension='t'), name='merge_mask')

    # Reverse the 4D brain mask, to produce a 4D non brain mask
    reverse_mask = Node(fsl.ImageMaths(op_string='-sub 1 -mul -1'),
                        name='reverse_mask')

    # Apply the mask on the raw functional data
    apply_mask = Node(fsl.ImageMaths(), name='apply_mask')

    # Highpass filter the non brain image
    highpass = create_highpass_filter(name='highpass')

    # Extract the mean signal from the non brain image
    mean_signal = Node(fsl.ImageMeants(), name='mean_signal')

    outputspec = Node(utility.IdentityInterface(fields=['nonbrain_regressor']),
                      name='outputspec')

    nonbrain_meansignal.connect(inputspec, 'func_file', split_image, 'in_file')
    nonbrain_meansignal.connect(split_image, 'out_files', brain_mask,
                                'in_file')
    nonbrain_meansignal.connect(brain_mask, 'mask_file', merge_mask,
                                'in_files')
    nonbrain_meansignal.connect(merge_mask, 'merged_file', reverse_mask,
                                'in_file')
    nonbrain_meansignal.connect(reverse_mask, 'out_file', apply_mask,
                                'mask_file')
    nonbrain_meansignal.connect(inputspec, 'func_file', apply_mask, 'in_file')
    nonbrain_meansignal.connect(apply_mask, 'out_file', highpass,
                                'inputspec.in_file')
    nonbrain_meansignal.connect(highpass, 'outputspec.filtered_file',
                                mean_signal, 'in_file')
    nonbrain_meansignal.connect(mean_signal, 'out_file', outputspec,
                                'nonbrain_regressor')

    return nonbrain_meansignal
예제 #23
0
def test_mapnode_expansion(tmpdir):
    tmpdir.chdir()
    from nipype import MapNode, Function

    def func1(in1):
        return in1 + 1

    mapnode = MapNode(Function(function=func1),
                      iterfield='in1',
                      name='mapnode',
                      n_procs=2,
                      mem_gb=2)
    mapnode.inputs.in1 = [1, 2]

    for idx, node in mapnode._make_nodes():
        for attr in ('overwrite', 'run_without_submitting', 'plugin_args'):
            assert getattr(node, attr) == getattr(mapnode, attr)
        for attr in ('_n_procs', '_mem_gb'):
            assert (getattr(node, attr) == getattr(mapnode, attr))
예제 #24
0
def create_realignment_workflow(name="realignment"):
    """Motion and slice-time correct the timeseries and summarize."""
    inputnode = Node(IdentityInterface(["timeseries"]), "inputs")

    # Get the middle volume of each run for motion correction
    extractref = MapNode(ExtractRealignmentTarget(), "in_file", "extractref")

    # Motion correct to middle volume of each run
    mcflirt = MapNode(
        fsl.MCFLIRT(cost="normcorr",
                    interpolation="spline",
                    save_mats=True,
                    save_rms=True,
                    save_plots=True), ["in_file", "ref_file"], "mcflirt")

    # Generate a report on the motion correction
    mcreport = MapNode(RealignmentReport(),
                       ["target_file", "realign_params", "displace_params"],
                       "mcreport")

    # Define the outputs
    outputnode = Node(
        IdentityInterface(
            ["timeseries", "example_func", "report", "motion_file"]),
        "outputs")

    # Define and connect the sub workflow
    realignment = Workflow(name)

    realignment.connect([
        (inputnode, extractref, [("timeseries", "in_file")]),
        (inputnode, mcflirt, [("timeseries", "in_file")]),
        (extractref, mcflirt, [("out_file", "ref_file")]),
        (extractref, mcreport, [("out_file", "target_file")]),
        (mcflirt, mcreport, [("par_file", "realign_params"),
                             ("rms_files", "displace_params")]),
        (mcflirt, outputnode, [("out_file", "timeseries")]),
        (extractref, outputnode, [("out_file", "example_func")]),
        (mcreport, outputnode, [("realign_report", "report"),
                                ("motion_file", "motion_file")]),
    ])

    return realignment
예제 #25
0
def create_unwarp_workflow(name="unwarp", fieldmap_pe=("y", "y-")):
    """Unwarp functional timeseries using reverse phase-blipped images."""
    inputnode = Node(IdentityInterface(["timeseries", "fieldmap"]), "inputs")

    # Calculate the shift field
    # Note that setting readout_times to 1 will give a fine
    # map of the field, but the units will be off
    # Since we don't write out the map of the field itself, it does
    # not seem worth it to add another parameter for the readout times.
    # (It does require that they are the same, but when wouldn't they be?)
    topup = MapNode(
        fsl.TOPUP(encoding_direction=fieldmap_pe,
                  readout_times=[1] * len(fieldmap_pe)), ["in_file"], "topup")

    # Unwarp the timeseries
    applytopup = MapNode(fsl.ApplyTOPUP(method="jac", in_index=[
        1
    ]), ["in_files", "in_topup_fieldcoef", "in_topup_movpar", "encoding_file"],
                         "applytopup")

    # Make a figure summarize the unwarping
    report = MapNode(UnwarpReport(), ["orig_file", "corrected_file"],
                     "unwarp_report")

    # Define the outputs
    outputnode = Node(IdentityInterface(["timeseries", "report"]), "outputs")

    # Define and connect the workflow
    unwarp = Workflow(name)
    unwarp.connect([
        (inputnode, topup, [("fieldmap", "in_file")]),
        (inputnode, applytopup, [("timeseries", "in_files")]),
        (topup, applytopup, [("out_fieldcoef", "in_topup_fieldcoef"),
                             ("out_movpar", "in_topup_movpar"),
                             ("out_enc_file", "encoding_file")]),
        (inputnode, report, [("fieldmap", "orig_file")]),
        (topup, report, [("out_corrected", "corrected_file")]),
        (applytopup, outputnode, [("out_corrected", "timeseries")]),
        (report, outputnode, [("out_file", "report")]),
    ])

    return unwarp
예제 #26
0
def test_mapnode_expansion(tmpdir):
    os.chdir(str(tmpdir))
    from nipype import MapNode, Function

    def func1(in1):
        return in1 + 1

    mapnode = MapNode(Function(function=func1),
                      iterfield='in1',
                      name='mapnode')
    mapnode.inputs.in1 = [1, 2]
    mapnode.interface.num_threads = 2
    mapnode.interface.estimated_memory_gb = 2

    for idx, node in mapnode._make_nodes():
        for attr in ('overwrite', 'run_without_submitting', 'plugin_args'):
            assert getattr(node, attr) == getattr(mapnode, attr)
        for attr in ('num_threads', 'estimated_memory_gb'):
            assert (getattr(node._interface,
                            attr) == getattr(mapnode._interface, attr))
예제 #27
0
def test_mapnode_expansion(tmpdir):
    tmpdir.chdir()
    from nipype import MapNode, Function

    def func1(in1):
        return in1 + 1

    mapnode = MapNode(Function(function=func1),
                      iterfield='in1',
                      name='mapnode',
                      n_procs=2,
                      mem_gb=2)
    mapnode.inputs.in1 = [1, 2]

    for idx, node in mapnode._make_nodes():
        for attr in ('overwrite', 'run_without_submitting', 'plugin_args'):
            assert getattr(node, attr) == getattr(mapnode, attr)
        for attr in ('_n_procs', '_mem_gb'):
            assert (getattr(node, attr) ==
                    getattr(mapnode, attr))
예제 #28
0
def run_bet(T1_image, workdir):
    """Run freesurfer, convert to nidm and extract stats
    """
    from nipype import fsl
    from nipype import MapNode

    strip = MapNode(fsl.BET(), iterfield=['in_file'], name='skullstripper')
    strip.inputs.in_file = T1_image
    strip.inputs.mesh = True
    strip.inputs.mask = True
    strip.base_dir = workdir

    bet_results = strip.run()
    provgraph = bet_results.provenance[0]
    for bundle in bet_results.provenance[1:]:
        provgraph.add_bundle(bundle)

    vol = MapNode(fsl.ImageStats(op_string='-V'), iterfield=['in_file'],
                  name='volumeextractor')
    vol.inputs.in_file = bet_results.outputs.out_file
    vol.base_dir = workdir
    vol_results = vol.run()
    for bundle in vol_results.provenance:
        provgraph.add_bundle(bundle)

    return provgraph, provgraph.rdf()
예제 #29
0
def create_confound_extraction_workflow(name="confounds", wm_components=6):
    """Extract nuisance variables from anatomical sources."""
    inputnode = Node(
        IdentityInterface(
            ["timeseries", "brain_mask", "reg_file", "subject_id"]), "inputs")

    # Find the subject's Freesurfer segmentation
    # Grab the Freesurfer aparc+aseg file as an anatomical brain mask
    getaseg = Node(
        io.SelectFiles({"aseg": "{subject_id}/mri/aseg.mgz"},
                       base_directory=os.environ["SUBJECTS_DIR"]), "getaseg")

    # Select and erode the white matter to get deep voxels
    selectwm = Node(fs.Binarize(erode=3, wm=True), "selectwm")

    # Transform the mask into functional space
    transform = MapNode(fs.ApplyVolTransform(inverse=True, interp="nearest"),
                        ["reg_file", "source_file"], "transform")

    # Extract eigenvariates of the timeseries from WM and whole brain
    extract = MapNode(ExtractConfounds(n_components=wm_components),
                      ["timeseries", "brain_mask", "wm_mask"], "extract")

    outputnode = Node(IdentityInterface(["confound_file"]), "outputs")

    confounds = Workflow(name)
    confounds.connect([
        (inputnode, getaseg, [("subject_id", "subject_id")]),
        (getaseg, selectwm, [("aseg", "in_file")]),
        (selectwm, transform, [("binary_file", "target_file")]),
        (inputnode, transform, [("reg_file", "reg_file"),
                                ("timeseries", "source_file")]),
        (transform, extract, [("transformed_file", "wm_mask")]),
        (inputnode, extract, [("timeseries", "timeseries"),
                              ("brain_mask", "brain_mask")]),
        (extract, outputnode, [("out_file", "confound_file")]),
    ])

    return confounds
예제 #30
0
def test_serial_input(tmpdir):
    tmpdir.chdir()
    wd = os.getcwd()
    from nipype import MapNode, Function, Workflow

    def func1(in1):
        return in1

    n1 = MapNode(
        Function(input_names=["in1"], output_names=["out"], function=func1),
        iterfield=["in1"],
        name="n1",
    )
    n1.inputs.in1 = [1, 2, 3]

    w1 = Workflow(name="test")
    w1.base_dir = wd
    w1.add_nodes([n1])
    # set local check
    w1.config["execution"] = {
        "stop_on_first_crash": "true",
        "local_hash_check": "true",
        "crashdump_dir": wd,
        "poll_sleep_duration": 2,
    }

    # test output of num_subnodes method when serial is default (False)
    assert n1.num_subnodes() == len(n1.inputs.in1)

    # test running the workflow on default conditions
    w1.run(plugin="MultiProc")

    # test output of num_subnodes method when serial is True
    n1._serial = True
    assert n1.num_subnodes() == 1

    # test running the workflow on serial conditions
    w1.run(plugin="MultiProc")
def make_func_mask_workflow(name='funcmask', base_dir=None):
    brainmask = Workflow(name=name, base_dir=base_dir)

    inputnode = Node(utility.IdentityInterface(fields=['mean_file']), name='inputnode')
    outputnode = Node(utility.IdentityInterface(fields=['masked_file', 'mask']),
                      name='outputnode')
    skullstrip1 = MapNode(fsl.BET(frac=0.2, mask=True, output_type='NIFTI_GZ'), name='skullstrip_first_pass',
                          iterfield=['in_file'])
    brainmask.connect(inputnode, 'mean_file', skullstrip1, 'in_file')
    skullstrip2 = MapNode(afni.Automask(dilate=1, outputtype='NIFTI_GZ'), name='skullstrip_second_pass',
                          iterfield=['in_file'])
    brainmask.connect(skullstrip1, 'out_file', skullstrip2, 'in_file')
    combine_masks = MapNode(fsl.BinaryMaths(operation='mul'), name='combine_masks', iterfield=['in_file',
                                                                                               'operand_file'])
    brainmask.connect(skullstrip1, 'mask_file', combine_masks, 'in_file')
    brainmask.connect(skullstrip2, 'out_file', combine_masks, 'operand_file')
    apply_mask = MapNode(fsl.ApplyMask(), name='apply_mask', iterfield=['in_file', 'mask_file'])
    brainmask.connect(inputnode, 'mean_file', apply_mask, 'in_file')
    brainmask.connect(combine_masks, 'out_file', apply_mask, 'mask_file')

    brainmask.connect(apply_mask, 'out_file', outputnode, 'masked_file')
    brainmask.connect(combine_masks, 'out_file', outputnode, 'mask')

    return brainmask
예제 #32
0
def make_simple_workflow():

    wf = Workflow(name="test")

    node1 = Node(IdentityInterface(fields=["foo"]), name="node1")
    node2 = MapNode(IdentityInterface(fields=["foo"]),
                    name="node2",
                    iterfield=["foo"])
    node3 = Node(IdentityInterface(fields=["foo"]), name="node3")

    wf.connect([
        (node1, node2, [("foo", "foo")]),
        (node2, node3, [("foo", "foo")]),
    ])

    return wf, node1, node2, node3
예제 #33
0
def test_mapnode_json():
    """Tests that mapnodes don't generate excess jsons
    """
    cwd = os.getcwd()
    wd = mkdtemp()
    os.chdir(wd)
    from nipype import MapNode, Function, Workflow

    def func1(in1):
        return in1 + 1

    n1 = MapNode(Function(input_names=['in1'],
                          output_names=['out'],
                          function=func1),
                 iterfield=['in1'],
                 name='n1')
    n1.inputs.in1 = [1]
    w1 = Workflow(name='test')
    w1.base_dir = wd
    w1.config['execution']['crashdump_dir'] = wd
    w1.add_nodes([n1])
    w1.run()
    n1.inputs.in1 = [2]
    w1.run()
    # should rerun
    n1.inputs.in1 = [1]
    eg = w1.run()

    node = eg.nodes()[0]
    outjson = glob(os.path.join(node.output_dir(), '_0x*.json'))
    yield assert_equal, len(outjson), 1

    # check that multiple json's don't trigger rerun
    with open(os.path.join(node.output_dir(), 'test.json'), 'wt') as fp:
        fp.write('dummy file')
    w1.config['execution'].update(**{'stop_on_first_rerun': True})
    error_raised = False
    try:
        w1.run()
    except:
        error_raised = True
    yield assert_false, error_raised
    os.chdir(cwd)
    rmtree(wd)
예제 #34
0
def computed_avg_node(node_name,
                      nnodes,
                      work_dir,
                      chunk=None,
                      delay=0,
                      benchmark_dir=None,
                      benchmark=False,
                      cli=False,
                      avg=None):
    files = get_partitions(chunk, nnodes)

    if delay is None:
        delay = 0

    ca_name = 'ca1_{0}'.format(node_name)
    ca2_name = 'ca2_{0}'.format(node_name)

    ca_1 = MapNode(Function(input_names=[
        'chunk', 'delay', 'benchmark', 'benchmark_dir', 'cli', 'wf_name',
        'avg', 'work_dir'
    ],
                            output_names=['inc_chunk'],
                            function=increment_wf),
                   name=ca_name,
                   iterfield='chunk')
    ca_1.inputs.chunk = files
    ca_1.inputs.delay = delay
    ca_1.inputs.benchmark = benchmark
    ca_1.inputs.benchmark_dir = benchmark_dir
    ca_1.inputs.cli = cli
    ca_1.inputs.wf_name = 'incwf_{}'.format(ca_name)
    ca_1.inputs.avg = avg
    ca_1.inputs.work_dir = work_dir

    ca_2 = Node(Function(input_names=['chunks', 'benchmark', 'benchmark_dir'],
                         output_names=['avg_chunk'],
                         function=compute_avg),
                name=ca2_name)

    ca_2.inputs.benchmark = benchmark
    ca_2.inputs.benchmark_dir = benchmark_dir

    return ca_1, ca_2
예제 #35
0
def cluster_save(nname, chunks, output_dir, it, benchmark, benchmark_dir,
                 nnodes, work_dir):
    files = get_partitions(chunks, nnodes)
    sp_name = 'sp_{}'.format(nname)

    sp = MapNode(Function(input_names=[
        'input_img', 'output_dir', 'it', 'benchmark', 'benchmark_dir',
        'work_dir'
    ],
                          output_names=['output_filename'],
                          function=save_wf),
                 name=sp_name,
                 iterfield='input_img')
    sp.inputs.input_img = files
    sp.inputs.output_dir = output_dir
    sp.inputs.it = it
    sp.inputs.benchmark = benchmark
    sp.inputs.benchmark_dir = benchmark_dir
    sp.inputs.work_dir = work_dir

    return sp
예제 #36
0
def test_mapnode_json(tmpdir):
    """Tests that mapnodes don't generate excess jsons
    """
    tmpdir.chdir()
    wd = os.getcwd()
    from nipype import MapNode, Function, Workflow

    def func1(in1):
        return in1 + 1

    n1 = MapNode(
        Function(input_names=["in1"], output_names=["out"], function=func1),
        iterfield=["in1"],
        name="n1",
    )
    n1.inputs.in1 = [1]
    w1 = Workflow(name="test")
    w1.base_dir = wd
    w1.config["execution"]["crashdump_dir"] = wd
    w1.add_nodes([n1])
    w1.run()
    n1.inputs.in1 = [2]
    w1.run()
    # should rerun
    n1.inputs.in1 = [1]
    eg = w1.run()

    node = list(eg.nodes())[0]
    outjson = glob(os.path.join(node.output_dir(), "_0x*.json"))
    assert len(outjson) == 1

    # check that multiple json's don't trigger rerun
    with open(os.path.join(node.output_dir(), "test.json"), "wt") as fp:
        fp.write("dummy file")
    w1.config["execution"].update(**{"stop_on_first_rerun": True})

    w1.run()
def create_workflow(files,
                    target_file,
                    subject_id,
                    TR,
                    slice_times,
                    norm_threshold=1,
                    num_components=5,
                    vol_fwhm=None,
                    surf_fwhm=None,
                    lowpass_freq=-1,
                    highpass_freq=-1,
                    subjects_dir=None,
                    sink_directory=os.getcwd(),
                    target_subject=['fsaverage3', 'fsaverage4'],
                    name='resting'):

    wf = Workflow(name=name)

    # Rename files in case they are named identically
    name_unique = MapNode(Rename(format_string='rest_%(run)02d'),
                          iterfield=['in_file', 'run'],
                          name='rename')
    name_unique.inputs.keep_ext = True
    name_unique.inputs.run = list(range(1, len(files) + 1))
    name_unique.inputs.in_file = files

    realign = Node(nipy.SpaceTimeRealigner(), name="spacetime_realign")
    realign.inputs.slice_times = slice_times
    realign.inputs.tr = TR
    realign.inputs.slice_info = 2
    realign.plugin_args = {'sbatch_args': '-c%d' % 4}

    # Compute TSNR on realigned data regressing polynomials up to order 2
    tsnr = MapNode(TSNR(regress_poly=2), iterfield=['in_file'], name='tsnr')
    wf.connect(realign, "out_file", tsnr, "in_file")

    # Compute the median image across runs
    calc_median = Node(Function(input_names=['in_files'],
                                output_names=['median_file'],
                                function=median,
                                imports=imports),
                       name='median')
    wf.connect(tsnr, 'detrended_file', calc_median, 'in_files')

    """Segment and Register
    """

    registration = create_reg_workflow(name='registration')
    wf.connect(calc_median, 'median_file', registration, 'inputspec.mean_image')
    registration.inputs.inputspec.subject_id = subject_id
    registration.inputs.inputspec.subjects_dir = subjects_dir
    registration.inputs.inputspec.target_image = target_file

    """Quantify TSNR in each freesurfer ROI
    """

    get_roi_tsnr = MapNode(fs.SegStats(default_color_table=True),
                           iterfield=['in_file'], name='get_aparc_tsnr')
    get_roi_tsnr.inputs.avgwf_txt_file = True
    wf.connect(tsnr, 'tsnr_file', get_roi_tsnr, 'in_file')
    wf.connect(registration, 'outputspec.aparc', get_roi_tsnr, 'segmentation_file')

    """Use :class:`nipype.algorithms.rapidart` to determine which of the
    images in the functional series are outliers based on deviations in
    intensity or movement.
    """

    art = Node(interface=ArtifactDetect(), name="art")
    art.inputs.use_differences = [True, True]
    art.inputs.use_norm = True
    art.inputs.norm_threshold = norm_threshold
    art.inputs.zintensity_threshold = 9
    art.inputs.mask_type = 'spm_global'
    art.inputs.parameter_source = 'NiPy'

    """Here we are connecting all the nodes together. Notice that we add the merge node only if you choose
    to use 4D. Also `get_vox_dims` function is passed along the input volume of normalise to set the optimal
    voxel sizes.
    """

    wf.connect([(name_unique, realign, [('out_file', 'in_file')]),
                (realign, art, [('out_file', 'realigned_files')]),
                (realign, art, [('par_file', 'realignment_parameters')]),
                ])

    def selectindex(files, idx):
        import numpy as np
        from nipype.utils.filemanip import filename_to_list, list_to_filename
        return list_to_filename(np.array(filename_to_list(files))[idx].tolist())

    mask = Node(fsl.BET(), name='getmask')
    mask.inputs.mask = True
    wf.connect(calc_median, 'median_file', mask, 'in_file')
    # get segmentation in normalized functional space

    def merge_files(in1, in2):
        out_files = filename_to_list(in1)
        out_files.extend(filename_to_list(in2))
        return out_files

    # filter some noise

    # Compute motion regressors
    motreg = Node(Function(input_names=['motion_params', 'order',
                                        'derivatives'],
                           output_names=['out_files'],
                           function=motion_regressors,
                           imports=imports),
                  name='getmotionregress')
    wf.connect(realign, 'par_file', motreg, 'motion_params')

    # Create a filter to remove motion and art confounds
    createfilter1 = Node(Function(input_names=['motion_params', 'comp_norm',
                                               'outliers', 'detrend_poly'],
                                  output_names=['out_files'],
                                  function=build_filter1,
                                  imports=imports),
                         name='makemotionbasedfilter')
    createfilter1.inputs.detrend_poly = 2
    wf.connect(motreg, 'out_files', createfilter1, 'motion_params')
    wf.connect(art, 'norm_files', createfilter1, 'comp_norm')
    wf.connect(art, 'outlier_files', createfilter1, 'outliers')

    filter1 = MapNode(fsl.GLM(out_f_name='F_mcart.nii.gz',
                              out_pf_name='pF_mcart.nii.gz',
                              demean=True),
                      iterfield=['in_file', 'design', 'out_res_name'],
                      name='filtermotion')

    wf.connect(realign, 'out_file', filter1, 'in_file')
    wf.connect(realign, ('out_file', rename, '_filtermotart'),
               filter1, 'out_res_name')
    wf.connect(createfilter1, 'out_files', filter1, 'design')

    createfilter2 = MapNode(ACompCor(),
                            iterfield=['realigned_file', 'extra_regressors'],
                            name='makecompcorrfilter')
    createfilter2.inputs.components_file = 'noise_components.txt'
    createfilter2.inputs.num_components = num_components

    wf.connect(createfilter1, 'out_files', createfilter2, 'extra_regressors')
    wf.connect(filter1, 'out_res', createfilter2, 'realigned_file')
    wf.connect(registration, ('outputspec.segmentation_files', selectindex, [0, 2]),
               createfilter2, 'mask_file')

    filter2 = MapNode(fsl.GLM(out_f_name='F.nii.gz',
                              out_pf_name='pF.nii.gz',
                              demean=True),
                      iterfield=['in_file', 'design', 'out_res_name'],
                      name='filter_noise_nosmooth')
    wf.connect(filter1, 'out_res', filter2, 'in_file')
    wf.connect(filter1, ('out_res', rename, '_cleaned'),
               filter2, 'out_res_name')
    wf.connect(createfilter2, 'components_file', filter2, 'design')
    wf.connect(mask, 'mask_file', filter2, 'mask')

    bandpass = Node(Function(input_names=['files', 'lowpass_freq',
                                          'highpass_freq', 'fs'],
                             output_names=['out_files'],
                             function=bandpass_filter,
                             imports=imports),
                    name='bandpass_unsmooth')
    bandpass.inputs.fs = 1. / TR
    bandpass.inputs.highpass_freq = highpass_freq
    bandpass.inputs.lowpass_freq = lowpass_freq
    wf.connect(filter2, 'out_res', bandpass, 'files')

    """Smooth the functional data using
    :class:`nipype.interfaces.fsl.IsotropicSmooth`.
    """

    smooth = MapNode(interface=fsl.IsotropicSmooth(), name="smooth", iterfield=["in_file"])
    smooth.inputs.fwhm = vol_fwhm

    wf.connect(bandpass, 'out_files', smooth, 'in_file')

    collector = Node(Merge(2), name='collect_streams')
    wf.connect(smooth, 'out_file', collector, 'in1')
    wf.connect(bandpass, 'out_files', collector, 'in2')

    """
    Transform the remaining images. First to anatomical and then to target
    """

    warpall = MapNode(ants.ApplyTransforms(), iterfield=['input_image'],
                      name='warpall')
    warpall.inputs.input_image_type = 3
    warpall.inputs.interpolation = 'Linear'
    warpall.inputs.invert_transform_flags = [False, False]
    warpall.inputs.terminal_output = 'file'
    warpall.inputs.reference_image = target_file
    warpall.inputs.args = '--float'
    warpall.inputs.num_threads = 2
    warpall.plugin_args = {'sbatch_args': '-c%d' % 2}

    # transform to target
    wf.connect(collector, 'out', warpall, 'input_image')
    wf.connect(registration, 'outputspec.transforms', warpall, 'transforms')

    mask_target = Node(fsl.ImageMaths(op_string='-bin'), name='target_mask')

    wf.connect(registration, 'outputspec.anat2target', mask_target, 'in_file')

    maskts = MapNode(fsl.ApplyMask(), iterfield=['in_file'], name='ts_masker')
    wf.connect(warpall, 'output_image', maskts, 'in_file')
    wf.connect(mask_target, 'out_file', maskts, 'mask_file')

    # map to surface
    # extract aparc+aseg ROIs
    # extract subcortical ROIs
    # extract target space ROIs
    # combine subcortical and cortical rois into a single cifti file

    #######
    # Convert aparc to subject functional space

    # Sample the average time series in aparc ROIs
    sampleaparc = MapNode(freesurfer.SegStats(default_color_table=True),
                          iterfield=['in_file', 'summary_file',
                                     'avgwf_txt_file'],
                          name='aparc_ts')
    sampleaparc.inputs.segment_id = ([8] + list(range(10, 14)) + [17, 18, 26, 47] +
                                     list(range(49, 55)) + [58] + list(range(1001, 1036)) +
                                     list(range(2001, 2036)))

    wf.connect(registration, 'outputspec.aparc',
               sampleaparc, 'segmentation_file')
    wf.connect(collector, 'out', sampleaparc, 'in_file')

    def get_names(files, suffix):
        """Generate appropriate names for output files
        """
        from nipype.utils.filemanip import (split_filename, filename_to_list,
                                            list_to_filename)
        import os
        out_names = []
        for filename in files:
            path, name, _ = split_filename(filename)
            out_names.append(os.path.join(path, name + suffix))
        return list_to_filename(out_names)

    wf.connect(collector, ('out', get_names, '_avgwf.txt'),
               sampleaparc, 'avgwf_txt_file')
    wf.connect(collector, ('out', get_names, '_summary.stats'),
               sampleaparc, 'summary_file')

    # Sample the time series onto the surface of the target surface. Performs
    # sampling into left and right hemisphere
    target = Node(IdentityInterface(fields=['target_subject']), name='target')
    target.iterables = ('target_subject', filename_to_list(target_subject))

    samplerlh = MapNode(freesurfer.SampleToSurface(),
                        iterfield=['source_file'],
                        name='sampler_lh')
    samplerlh.inputs.sampling_method = "average"
    samplerlh.inputs.sampling_range = (0.1, 0.9, 0.1)
    samplerlh.inputs.sampling_units = "frac"
    samplerlh.inputs.interp_method = "trilinear"
    samplerlh.inputs.smooth_surf = surf_fwhm
    # samplerlh.inputs.cortex_mask = True
    samplerlh.inputs.out_type = 'niigz'
    samplerlh.inputs.subjects_dir = subjects_dir

    samplerrh = samplerlh.clone('sampler_rh')

    samplerlh.inputs.hemi = 'lh'
    wf.connect(collector, 'out', samplerlh, 'source_file')
    wf.connect(registration, 'outputspec.out_reg_file', samplerlh, 'reg_file')
    wf.connect(target, 'target_subject', samplerlh, 'target_subject')

    samplerrh.set_input('hemi', 'rh')
    wf.connect(collector, 'out', samplerrh, 'source_file')
    wf.connect(registration, 'outputspec.out_reg_file', samplerrh, 'reg_file')
    wf.connect(target, 'target_subject', samplerrh, 'target_subject')

    # Combine left and right hemisphere to text file
    combiner = MapNode(Function(input_names=['left', 'right'],
                                output_names=['out_file'],
                                function=combine_hemi,
                                imports=imports),
                       iterfield=['left', 'right'],
                       name="combiner")
    wf.connect(samplerlh, 'out_file', combiner, 'left')
    wf.connect(samplerrh, 'out_file', combiner, 'right')

    # Sample the time series file for each subcortical roi
    ts2txt = MapNode(Function(input_names=['timeseries_file', 'label_file',
                                           'indices'],
                              output_names=['out_file'],
                              function=extract_subrois,
                              imports=imports),
                     iterfield=['timeseries_file'],
                     name='getsubcortts')
    ts2txt.inputs.indices = [8] + list(range(10, 14)) + [17, 18, 26, 47] +\
        list(range(49, 55)) + [58]
    ts2txt.inputs.label_file = \
        os.path.abspath(('OASIS-TRT-20_jointfusion_DKT31_CMA_labels_in_MNI152_'
                         '2mm_v2.nii.gz'))
    wf.connect(maskts, 'out_file', ts2txt, 'timeseries_file')

    ######

    substitutions = [('_target_subject_', ''),
                     ('_filtermotart_cleaned_bp_trans_masked', ''),
                     ('_filtermotart_cleaned_bp', ''),
                     ]
    substitutions += [("_smooth%d" % i, "") for i in range(11)[::-1]]
    substitutions += [("_ts_masker%d" % i, "") for i in range(11)[::-1]]
    substitutions += [("_getsubcortts%d" % i, "") for i in range(11)[::-1]]
    substitutions += [("_combiner%d" % i, "") for i in range(11)[::-1]]
    substitutions += [("_filtermotion%d" % i, "") for i in range(11)[::-1]]
    substitutions += [("_filter_noise_nosmooth%d" % i, "") for i in range(11)[::-1]]
    substitutions += [("_makecompcorfilter%d" % i, "") for i in range(11)[::-1]]
    substitutions += [("_get_aparc_tsnr%d/" % i, "run%d_" % (i + 1)) for i in range(11)[::-1]]

    substitutions += [("T1_out_brain_pve_0_maths_warped", "compcor_csf"),
                      ("T1_out_brain_pve_1_maths_warped", "compcor_gm"),
                      ("T1_out_brain_pve_2_maths_warped", "compcor_wm"),
                      ("output_warped_image_maths", "target_brain_mask"),
                      ("median_brain_mask", "native_brain_mask"),
                      ("corr_", "")]

    regex_subs = [('_combiner.*/sar', '/smooth/'),
                  ('_combiner.*/ar', '/unsmooth/'),
                  ('_aparc_ts.*/sar', '/smooth/'),
                  ('_aparc_ts.*/ar', '/unsmooth/'),
                  ('_getsubcortts.*/sar', '/smooth/'),
                  ('_getsubcortts.*/ar', '/unsmooth/'),
                  ('series/sar', 'series/smooth/'),
                  ('series/ar', 'series/unsmooth/'),
                  ('_inverse_transform./', ''),
                  ]
    # Save the relevant data into an output directory
    datasink = Node(interface=DataSink(), name="datasink")
    datasink.inputs.base_directory = sink_directory
    datasink.inputs.container = subject_id
    datasink.inputs.substitutions = substitutions
    datasink.inputs.regexp_substitutions = regex_subs  # (r'(/_.*(\d+/))', r'/run\2')
    wf.connect(realign, 'par_file', datasink, 'resting.qa.motion')
    wf.connect(art, 'norm_files', datasink, 'resting.qa.art.@norm')
    wf.connect(art, 'intensity_files', datasink, 'resting.qa.art.@intensity')
    wf.connect(art, 'outlier_files', datasink, 'resting.qa.art.@outlier_files')
    wf.connect(registration, 'outputspec.segmentation_files', datasink, 'resting.mask_files')
    wf.connect(registration, 'outputspec.anat2target', datasink, 'resting.qa.ants')
    wf.connect(mask, 'mask_file', datasink, 'resting.mask_files.@brainmask')
    wf.connect(mask_target, 'out_file', datasink, 'resting.mask_files.target')
    wf.connect(filter1, 'out_f', datasink, 'resting.qa.compmaps.@mc_F')
    wf.connect(filter1, 'out_pf', datasink, 'resting.qa.compmaps.@mc_pF')
    wf.connect(filter2, 'out_f', datasink, 'resting.qa.compmaps')
    wf.connect(filter2, 'out_pf', datasink, 'resting.qa.compmaps.@p')
    wf.connect(registration, 'outputspec.min_cost_file', datasink, 'resting.qa.mincost')
    wf.connect(tsnr, 'tsnr_file', datasink, 'resting.qa.tsnr.@map')
    wf.connect([(get_roi_tsnr, datasink, [('avgwf_txt_file', 'resting.qa.tsnr'),
                                          ('summary_file', 'resting.qa.tsnr.@summary')])])

    wf.connect(bandpass, 'out_files', datasink, 'resting.timeseries.@bandpassed')
    wf.connect(smooth, 'out_file', datasink, 'resting.timeseries.@smoothed')
    wf.connect(createfilter1, 'out_files',
               datasink, 'resting.regress.@regressors')
    wf.connect(createfilter2, 'components_file',
               datasink, 'resting.regress.@compcorr')
    wf.connect(maskts, 'out_file', datasink, 'resting.timeseries.target')
    wf.connect(sampleaparc, 'summary_file',
               datasink, 'resting.parcellations.aparc')
    wf.connect(sampleaparc, 'avgwf_txt_file',
               datasink, 'resting.parcellations.aparc.@avgwf')
    wf.connect(ts2txt, 'out_file',
               datasink, 'resting.parcellations.grayo.@subcortical')

    datasink2 = Node(interface=DataSink(), name="datasink2")
    datasink2.inputs.base_directory = sink_directory
    datasink2.inputs.container = subject_id
    datasink2.inputs.substitutions = substitutions
    datasink2.inputs.regexp_substitutions = regex_subs  # (r'(/_.*(\d+/))', r'/run\2')
    wf.connect(combiner, 'out_file',
               datasink2, 'resting.parcellations.grayo.@surface')
    return wf
예제 #38
0
파일: model.py 프로젝트: toddt/lyman
def create_timeseries_model_workflow(name="model", exp_info=None):

    # Default experiment parameters for generating graph inamge, testing, etc.
    if exp_info is None:
        exp_info = default_experiment_parameters()

    # Define constant inputs
    inputs = ["design_file", "realign_file", "artifact_file", "timeseries"]

    # Possibly add the regressor file to the inputs
    if exp_info["regressor_file"] is not None:
        inputs.append("regressor_file")

    # Define the workflow inputs
    inputnode = Node(IdentityInterface(inputs), "inputs")

    # Set up the experimental design
    modelsetup = MapNode(Function(["exp_info",
                                   "design_file",
                                   "realign_file",
                                   "artifact_file",
                                   "regressor_file",
                                   "run"],
                                  ["design_matrix_file",
                                   "contrast_file",
                                   "design_matrix_pkl",
                                   "report"],
                                  setup_model,
                                  imports),
                          ["realign_file", "artifact_file", "run"],
                          "modelsetup")
    modelsetup.inputs.exp_info = exp_info
    if exp_info["regressor_file"] is None:
        modelsetup.inputs.regressor_file = None

    # Use film_gls to estimate the timeseries model
    modelestimate = MapNode(fsl.FILMGLS(smooth_autocorr=True,
                                        mask_size=5,
                                        threshold=1000),
                            ["design_file", "in_file"],
                            "modelestimate")

    # Run the contrast estimation routine
    contrastestimate = MapNode(fsl.ContrastMgr(),
                               ["tcon_file",
                                "dof_file",
                                "corrections",
                                "param_estimates",
                                "sigmasquareds"],
                               "contrastestimate")

    calcrsquared = MapNode(Function(["design_matrix_pkl",
                                     "timeseries",
                                     "pe_files"],
                                    ["r2_files",
                                     "ss_files"],
                                    compute_rsquareds,
                                    imports),
                           ["design_matrix_pkl",
                            "timeseries",
                            "pe_files"],
                           "calcrsquared")
    calcrsquared.plugin_args = dict(qsub_args="-l h_vmem=8G")

    # Save the experiment info for this run
    dumpjson = MapNode(Function(["exp_info", "timeseries"], ["json_file"],
                                dump_exp_info, imports),
                    "timeseries",
                    "dumpjson")
    dumpjson.inputs.exp_info = exp_info

    # Report on the results of the model
    modelreport = MapNode(Function(["timeseries",
                                    "sigmasquareds_file",
                                    "zstat_files",
                                    "r2_files"],
                                   ["report"],
                                   report_model,
                                   imports),
                          ["timeseries", "sigmasquareds_file",
                           "zstat_files", "r2_files"],
                          "modelreport")

    # Define the workflow outputs
    outputnode = Node(IdentityInterface(["results",
                                         "copes",
                                         "varcopes",
                                         "zstats",
                                         "r2_files",
                                         "ss_files",
                                         "report",
                                         "design_mat",
                                         "contrast_mat",
                                         "design_pkl",
                                         "design_report",
                                         "json_file"]),
                      "outputs")

    # Define the workflow and connect the nodes
    model = Workflow(name=name)
    model.connect([
        (inputnode, modelsetup,
            [("design_file", "design_file"),
             ("realign_file", "realign_file"),
             ("artifact_file", "artifact_file"),
             (("timeseries", run_indices), "run")]),
        (inputnode, modelestimate,
            [("timeseries", "in_file")]),
        (inputnode, dumpjson,
            [("timeseries", "timeseries")]),
        (modelsetup, modelestimate,
            [("design_matrix_file", "design_file")]),
        (modelestimate, contrastestimate,
            [("dof_file", "dof_file"),
             ("corrections", "corrections"),
             ("param_estimates", "param_estimates"),
             ("sigmasquareds", "sigmasquareds")]),
        (modelsetup, contrastestimate,
            [("contrast_file", "tcon_file")]),
        (modelsetup, calcrsquared,
            [("design_matrix_pkl", "design_matrix_pkl")]),
        (inputnode, calcrsquared,
            [("timeseries", "timeseries")]),
        (modelestimate, calcrsquared,
            [("param_estimates", "pe_files")]),
        (inputnode, modelreport,
            [("timeseries", "timeseries")]),
        (modelestimate, modelreport,
            [("sigmasquareds", "sigmasquareds_file")]),
        (contrastestimate, modelreport,
            [("zstats", "zstat_files")]),
        (calcrsquared, modelreport,
            [("r2_files", "r2_files")]),
        (modelsetup, outputnode,
            [("design_matrix_file", "design_mat"),
             ("contrast_file", "contrast_mat"),
             ("design_matrix_pkl", "design_pkl"),
             ("report", "design_report")]),
        (dumpjson, outputnode,
            [("json_file", "json_file")]),
        (modelestimate, outputnode,
            [("results_dir", "results")]),
        (contrastestimate, outputnode,
            [("copes", "copes"),
             ("varcopes", "varcopes"),
             ("zstats", "zstats")]),
        (calcrsquared, outputnode,
            [("r2_files", "r2_files"),
             ("ss_files", "ss_files")]),
        (modelreport, outputnode,
            [("report", "report")]),
        ])

    if exp_info["regressor_file"] is not None:
        model.connect([
            (inputnode, modelsetup,
                [("regressor_file", "regressor_file")])
                       ])

    return model, inputnode, outputnode
예제 #39
0
def create_workflow(files,
                    subject_id,
                    n_vol=0,
                    despike=True,
                    TR=None,
                    slice_times=None,
                    slice_thickness=None,
                    fieldmap_images=[],
                    norm_threshold=1,
                    num_components=6,
                    vol_fwhm=None,
                    surf_fwhm=None,
                    lowpass_freq=-1,
                    highpass_freq=-1,
                    sink_directory=os.getcwd(),
                    FM_TEdiff=2.46,
                    FM_sigma=2,
                    FM_echo_spacing=.7,
                    target_subject=['fsaverage3', 'fsaverage4'],
                    name='resting'):

    wf = Workflow(name=name)

    # Skip starting volumes
    remove_vol = MapNode(fsl.ExtractROI(t_min=n_vol, t_size=-1),
                         iterfield=['in_file'],
                         name="remove_volumes")
    remove_vol.inputs.in_file = files

    # Run AFNI's despike. This is always run, however, whether this is fed to
    # realign depends on the input configuration
    despiker = MapNode(afni.Despike(outputtype='NIFTI_GZ'),
                       iterfield=['in_file'],
                       name='despike')
    #despiker.plugin_args = {'qsub_args': '-l nodes=1:ppn='}

    wf.connect(remove_vol, 'roi_file', despiker, 'in_file')

    # Run Nipy joint slice timing and realignment algorithm
    realign = Node(nipy.SpaceTimeRealigner(), name='realign')
    realign.inputs.tr = TR
    realign.inputs.slice_times = slice_times
    realign.inputs.slice_info = 2

    if despike:
        wf.connect(despiker, 'out_file', realign, 'in_file')
    else:
        wf.connect(remove_vol, 'roi_file', realign, 'in_file')

    # Comute TSNR on realigned data regressing polynomials upto order 2
    tsnr = MapNode(TSNR(regress_poly=2), iterfield=['in_file'], name='tsnr')
    wf.connect(realign, 'out_file', tsnr, 'in_file')

    # Compute the median image across runs
    calc_median = Node(Function(input_names=['in_files'],
                                output_names=['median_file'],
                                function=median,
                                imports=imports),
                       name='median')
    wf.connect(tsnr, 'detrended_file', calc_median, 'in_files')

    # Coregister the median to the surface
    register = Node(freesurfer.BBRegister(),
                    name='bbregister')
    register.inputs.subject_id = subject_id
    register.inputs.init = 'fsl'
    register.inputs.contrast_type = 't2'
    register.inputs.out_fsl_file = True
    register.inputs.epi_mask = True

    # Compute fieldmaps and unwarp using them
    if fieldmap_images:
        fieldmap = Node(interface=EPIDeWarp(), name='fieldmap_unwarp')
        fieldmap.inputs.tediff = FM_TEdiff
        fieldmap.inputs.esp = FM_echo_spacing
        fieldmap.inputs.sigma = FM_sigma
        fieldmap.inputs.mag_file = fieldmap_images[0]
        fieldmap.inputs.dph_file = fieldmap_images[1]
        wf.connect(calc_median, 'median_file', fieldmap, 'exf_file')

        dewarper = MapNode(interface=fsl.FUGUE(), iterfield=['in_file'],
                           name='dewarper')
        wf.connect(tsnr, 'detrended_file', dewarper, 'in_file')
        wf.connect(fieldmap, 'exf_mask', dewarper, 'mask_file')
        wf.connect(fieldmap, 'vsm_file', dewarper, 'shift_in_file')
        wf.connect(fieldmap, 'exfdw', register, 'source_file')
    else:
        wf.connect(calc_median, 'median_file', register, 'source_file')

    # Get the subject's freesurfer source directory
    fssource = Node(FreeSurferSource(),
                    name='fssource')
    fssource.inputs.subject_id = subject_id
    fssource.inputs.subjects_dir = os.environ['SUBJECTS_DIR']

    # Extract wm+csf, brain masks by eroding freesurfer lables and then
    # transform the masks into the space of the median
    wmcsf = Node(freesurfer.Binarize(), name='wmcsfmask')
    mask = wmcsf.clone('anatmask')
    wmcsftransform = Node(freesurfer.ApplyVolTransform(inverse=True,
                                                       interp='nearest'),
                          name='wmcsftransform')
    wmcsftransform.inputs.subjects_dir = os.environ['SUBJECTS_DIR']
    wmcsf.inputs.wm_ven_csf = True
    wmcsf.inputs.match = [4, 5, 14, 15, 24, 31, 43, 44, 63]
    wmcsf.inputs.binary_file = 'wmcsf.nii.gz'
    wmcsf.inputs.erode = int(np.ceil(slice_thickness))
    wf.connect(fssource, ('aparc_aseg', get_aparc_aseg), wmcsf, 'in_file')
    if fieldmap_images:
        wf.connect(fieldmap, 'exf_mask', wmcsftransform, 'source_file')
    else:
        wf.connect(calc_median, 'median_file', wmcsftransform, 'source_file')
    wf.connect(register, 'out_reg_file', wmcsftransform, 'reg_file')
    wf.connect(wmcsf, 'binary_file', wmcsftransform, 'target_file')

    mask.inputs.binary_file = 'mask.nii.gz'
    mask.inputs.dilate = int(np.ceil(slice_thickness)) + 1
    mask.inputs.erode = int(np.ceil(slice_thickness))
    mask.inputs.min = 0.5
    wf.connect(fssource, ('aparc_aseg', get_aparc_aseg), mask, 'in_file')
    masktransform = wmcsftransform.clone("masktransform")
    if fieldmap_images:
        wf.connect(fieldmap, 'exf_mask', masktransform, 'source_file')
    else:
        wf.connect(calc_median, 'median_file', masktransform, 'source_file')
    wf.connect(register, 'out_reg_file', masktransform, 'reg_file')
    wf.connect(mask, 'binary_file', masktransform, 'target_file')

    # Compute Art outliers
    art = Node(interface=ArtifactDetect(use_differences=[True, False],
                                        use_norm=True,
                                        norm_threshold=norm_threshold,
                                        zintensity_threshold=3,
                                        parameter_source='NiPy',
                                        bound_by_brainmask=True,
                                        save_plot=False,
                                        mask_type='file'),
               name="art")
    if fieldmap_images:
        wf.connect(dewarper, 'unwarped_file', art, 'realigned_files')
    else:
        wf.connect(tsnr, 'detrended_file', art, 'realigned_files')
    wf.connect(realign, 'par_file',
               art, 'realignment_parameters')
    wf.connect(masktransform, 'transformed_file', art, 'mask_file')

    # Compute motion regressors
    motreg = Node(Function(input_names=['motion_params', 'order',
                                        'derivatives'],
                           output_names=['out_files'],
                           function=motion_regressors,
                           imports=imports),
                  name='getmotionregress')
    wf.connect(realign, 'par_file', motreg, 'motion_params')

    # Create a filter to remove motion and art confounds
    createfilter1 = Node(Function(input_names=['motion_params', 'comp_norm',
                                               'outliers'],
                                  output_names=['out_files'],
                                  function=build_filter1,
                                  imports=imports),
                         name='makemotionbasedfilter')
    wf.connect(motreg, 'out_files', createfilter1, 'motion_params')
    wf.connect(art, 'norm_files', createfilter1, 'comp_norm')
    wf.connect(art, 'outlier_files', createfilter1, 'outliers')

    # Filter the motion and art confounds
    filter1 = MapNode(fsl.GLM(out_res_name='timeseries.nii.gz',
                              demean=True),
                      iterfield=['in_file', 'design'],
                      name='filtermotion')
    if fieldmap_images:
        wf.connect(dewarper, 'unwarped_file', filter1, 'in_file')
    else:
        wf.connect(tsnr, 'detrended_file', filter1, 'in_file')
    wf.connect(createfilter1, 'out_files', filter1, 'design')
    wf.connect(masktransform, 'transformed_file', filter1, 'mask')

    # Create a filter to remove noise components based on white matter and CSF
    createfilter2 = MapNode(Function(input_names=['realigned_file', 'mask_file',
                                                  'num_components'],
                                     output_names=['out_files'],
                                     function=extract_noise_components,
                                     imports=imports),
                            iterfield=['realigned_file'],
                            name='makecompcorrfilter')
    createfilter2.inputs.num_components = num_components
    wf.connect(filter1, 'out_res', createfilter2, 'realigned_file')
    wf.connect(masktransform, 'transformed_file', createfilter2, 'mask_file')

    # Filter noise components
    filter2 = MapNode(fsl.GLM(out_res_name='timeseries_cleaned.nii.gz',
                              demean=True),
                      iterfield=['in_file', 'design'],
                      name='filtercompcorr')
    wf.connect(filter1, 'out_res', filter2, 'in_file')
    wf.connect(createfilter2, 'out_files', filter2, 'design')
    wf.connect(masktransform, 'transformed_file', filter2, 'mask')

    # Smoothing using surface and volume smoothing
    smooth = MapNode(freesurfer.Smooth(),
                     iterfield=['in_file'],
                     name='smooth')
    smooth.inputs.proj_frac_avg = (0.1, 0.9, 0.1)
    if surf_fwhm is None:
        surf_fwhm = 5 * slice_thickness
    smooth.inputs.surface_fwhm = surf_fwhm
    if vol_fwhm is None:
        vol_fwhm = 2 * slice_thickness
    smooth.inputs.vol_fwhm = vol_fwhm
    wf.connect(filter2, 'out_res',  smooth, 'in_file')
    wf.connect(register, 'out_reg_file', smooth, 'reg_file')

    # Bandpass filter the data
    bandpass = MapNode(fsl.TemporalFilter(),
                       iterfield=['in_file'],
                       name='bandpassfilter')
    if highpass_freq < 0:
            bandpass.inputs.highpass_sigma = -1
    else:
            bandpass.inputs.highpass_sigma = 1. / (2 * TR * highpass_freq)
    if lowpass_freq < 0:
            bandpass.inputs.lowpass_sigma = -1
    else:
            bandpass.inputs.lowpass_sigma = 1. / (2 * TR * lowpass_freq)
    wf.connect(smooth, 'smoothed_file', bandpass, 'in_file')

    # Convert aparc to subject functional space
    aparctransform = wmcsftransform.clone("aparctransform")
    if fieldmap_images:
        wf.connect(fieldmap, 'exf_mask', aparctransform, 'source_file')
    else:
        wf.connect(calc_median, 'median_file', aparctransform, 'source_file')
    wf.connect(register, 'out_reg_file', aparctransform, 'reg_file')
    wf.connect(fssource, ('aparc_aseg', get_aparc_aseg),
               aparctransform, 'target_file')

    # Sample the average time series in aparc ROIs
    sampleaparc = MapNode(freesurfer.SegStats(avgwf_txt_file=True,
                                              default_color_table=True),
                          iterfield=['in_file'],
                          name='aparc_ts')
    sampleaparc.inputs.segment_id = ([8] + range(10, 14) + [17, 18, 26, 47] +
                                     range(49, 55) + [58] + range(1001, 1036) +
                                     range(2001, 2036))

    wf.connect(aparctransform, 'transformed_file',
               sampleaparc, 'segmentation_file')
    wf.connect(bandpass, 'out_file', sampleaparc, 'in_file')

    # Sample the time series onto the surface of the target surface. Performs
    # sampling into left and right hemisphere
    target = Node(IdentityInterface(fields=['target_subject']), name='target')
    target.iterables = ('target_subject', filename_to_list(target_subject))

    samplerlh = MapNode(freesurfer.SampleToSurface(),
                        iterfield=['source_file'],
                        name='sampler_lh')
    samplerlh.inputs.sampling_method = "average"
    samplerlh.inputs.sampling_range = (0.1, 0.9, 0.1)
    samplerlh.inputs.sampling_units = "frac"
    samplerlh.inputs.interp_method = "trilinear"
    #samplerlh.inputs.cortex_mask = True
    samplerlh.inputs.out_type = 'niigz'
    samplerlh.inputs.subjects_dir = os.environ['SUBJECTS_DIR']

    samplerrh = samplerlh.clone('sampler_rh')

    samplerlh.inputs.hemi = 'lh'
    wf.connect(bandpass, 'out_file', samplerlh, 'source_file')
    wf.connect(register, 'out_reg_file', samplerlh, 'reg_file')
    wf.connect(target, 'target_subject', samplerlh, 'target_subject')

    samplerrh.set_input('hemi', 'rh')
    wf.connect(bandpass, 'out_file', samplerrh, 'source_file')
    wf.connect(register, 'out_reg_file', samplerrh, 'reg_file')
    wf.connect(target, 'target_subject', samplerrh, 'target_subject')

    # Combine left and right hemisphere to text file
    combiner = MapNode(Function(input_names=['left', 'right'],
                                output_names=['out_file'],
                                function=combine_hemi,
                                imports=imports),
                       iterfield=['left', 'right'],
                       name="combiner")
    wf.connect(samplerlh, 'out_file', combiner, 'left')
    wf.connect(samplerrh, 'out_file', combiner, 'right')

    # Compute registration between the subject's structural and MNI template
    # This is currently set to perform a very quick registration. However, the
    # registration can be made significantly more accurate for cortical
    # structures by increasing the number of iterations
    # All parameters are set using the example from:
    # https://github.com/stnava/ANTs/blob/master/Scripts/newAntsExample.sh
    reg = Node(ants.Registration(), name='antsRegister')
    reg.inputs.output_transform_prefix = "output_"
    reg.inputs.transforms = ['Translation', 'Rigid', 'Affine', 'SyN']
    reg.inputs.transform_parameters = [(0.1,), (0.1,), (0.1,), (0.2, 3.0, 0.0)]
    # reg.inputs.number_of_iterations = ([[10000, 111110, 11110]]*3 +
    #                                    [[100, 50, 30]])
    reg.inputs.number_of_iterations = [[100, 100, 100]] * 3 + [[100, 20, 10]]
    reg.inputs.dimension = 3
    reg.inputs.write_composite_transform = True
    reg.inputs.collapse_output_transforms = False
    reg.inputs.metric = ['Mattes'] * 3 + [['Mattes', 'CC']]
    reg.inputs.metric_weight = [1] * 3 + [[0.5, 0.5]]
    reg.inputs.radius_or_number_of_bins = [32] * 3 + [[32, 4]]
    reg.inputs.sampling_strategy = ['Regular'] * 3 + [[None, None]]
    reg.inputs.sampling_percentage = [0.3] * 3 + [[None, None]]
    reg.inputs.convergence_threshold = [1.e-8] * 3 + [-0.01]
    reg.inputs.convergence_window_size = [20] * 3 + [5]
    reg.inputs.smoothing_sigmas = [[4, 2, 1]] * 3 + [[1, 0.5, 0]]
    reg.inputs.sigma_units = ['vox'] * 4
    reg.inputs.shrink_factors = [[6, 4, 2]] + [[3, 2, 1]]*2 + [[4, 2, 1]]
    reg.inputs.use_estimate_learning_rate_once = [True] * 4
    reg.inputs.use_histogram_matching = [False] * 3 + [True]
    reg.inputs.output_warped_image = 'output_warped_image.nii.gz'
    reg.inputs.fixed_image = \
        os.path.abspath('OASIS-30_Atropos_template_in_MNI152_2mm.nii.gz')
    reg.inputs.num_threads = 4
    reg.plugin_args = {'qsub_args': '-l nodes=1:ppn=4'}

    # Convert T1.mgz to nifti for using with ANTS
    convert = Node(freesurfer.MRIConvert(out_type='niigz'), name='convert2nii')
    wf.connect(fssource, 'T1', convert, 'in_file')

    # Mask the T1.mgz file with the brain mask computed earlier
    maskT1 = Node(fsl.BinaryMaths(operation='mul'), name='maskT1')
    wf.connect(mask, 'binary_file', maskT1, 'operand_file')
    wf.connect(convert, 'out_file', maskT1, 'in_file')
    wf.connect(maskT1, 'out_file', reg, 'moving_image')

    # Convert the BBRegister transformation to ANTS ITK format
    convert2itk = MapNode(C3dAffineTool(),
                          iterfield=['transform_file', 'source_file'],
                          name='convert2itk')
    convert2itk.inputs.fsl2ras = True
    convert2itk.inputs.itk_transform = True
    wf.connect(register, 'out_fsl_file', convert2itk, 'transform_file')
    if fieldmap_images:
        wf.connect(fieldmap, 'exf_mask', convert2itk, 'source_file')
    else:
        wf.connect(calc_median, 'median_file', convert2itk, 'source_file')
    wf.connect(convert, 'out_file', convert2itk, 'reference_file')

    # Concatenate the affine and ants transforms into a list
    pickfirst = lambda x: x[0]
    merge = MapNode(Merge(2), iterfield=['in2'], name='mergexfm')
    wf.connect(convert2itk, 'itk_transform', merge, 'in2')
    wf.connect(reg, ('composite_transform', pickfirst), merge, 'in1')

    # Apply the combined transform to the time series file
    sample2mni = MapNode(ants.ApplyTransforms(),
                         iterfield=['input_image', 'transforms'],
                         name='sample2mni')
    sample2mni.inputs.input_image_type = 3
    sample2mni.inputs.interpolation = 'BSpline'
    sample2mni.inputs.invert_transform_flags = [False, False]
    sample2mni.inputs.reference_image = \
        os.path.abspath('OASIS-30_Atropos_template_in_MNI152_2mm.nii.gz')
    sample2mni.inputs.terminal_output = 'file'
    wf.connect(bandpass, 'out_file', sample2mni, 'input_image')
    wf.connect(merge, 'out', sample2mni, 'transforms')

    # Sample the time series file for each subcortical roi
    ts2txt = MapNode(Function(input_names=['timeseries_file', 'label_file',
                                           'indices'],
                              output_names=['out_file'],
                              function=extract_subrois,
                              imports=imports),
                     iterfield=['timeseries_file'],
                     name='getsubcortts')
    ts2txt.inputs.indices = [8] + range(10, 14) + [17, 18, 26, 47] +\
                            range(49, 55) + [58]
    ts2txt.inputs.label_file = \
        os.path.abspath(('OASIS-TRT-20_jointfusion_DKT31_CMA_labels_in_MNI152_'
                         '2mm.nii.gz'))
    wf.connect(sample2mni, 'output_image', ts2txt, 'timeseries_file')

    # Save the relevant data into an output directory
    datasink = Node(interface=DataSink(), name="datasink")
    datasink.inputs.base_directory = sink_directory
    datasink.inputs.container = subject_id
    datasink.inputs.substitutions = [('_target_subject_', '')]
    datasink.inputs.regexp_substitutions = (r'(/_.*(\d+/))', r'/run\2')
    wf.connect(despiker, 'out_file', datasink, 'resting.qa.despike')
    wf.connect(realign, 'par_file', datasink, 'resting.qa.motion')
    wf.connect(tsnr, 'tsnr_file', datasink, 'resting.qa.tsnr')
    wf.connect(tsnr, 'mean_file', datasink, 'resting.qa.tsnr.@mean')
    wf.connect(tsnr, 'stddev_file', datasink, 'resting.qa.@tsnr_stddev')
    if fieldmap_images:
        wf.connect(fieldmap, 'exf_mask', datasink, 'resting.reference')
    else:
        wf.connect(calc_median, 'median_file', datasink, 'resting.reference')
    wf.connect(art, 'norm_files', datasink, 'resting.qa.art.@norm')
    wf.connect(art, 'intensity_files', datasink, 'resting.qa.art.@intensity')
    wf.connect(art, 'outlier_files', datasink, 'resting.qa.art.@outlier_files')
    wf.connect(mask, 'binary_file', datasink, 'resting.mask')
    wf.connect(masktransform, 'transformed_file',
               datasink, 'resting.mask.@transformed_file')
    wf.connect(register, 'out_reg_file', datasink, 'resting.registration.bbreg')
    wf.connect(reg, ('composite_transform', pickfirst),
               datasink, 'resting.registration.ants')
    wf.connect(register, 'min_cost_file',
               datasink, 'resting.qa.bbreg.@mincost')
    wf.connect(smooth, 'smoothed_file', datasink, 'resting.timeseries.fullpass')
    wf.connect(bandpass, 'out_file', datasink, 'resting.timeseries.bandpassed')
    wf.connect(sample2mni, 'output_image', datasink, 'resting.timeseries.mni')
    wf.connect(createfilter1, 'out_files',
               datasink, 'resting.regress.@regressors')
    wf.connect(createfilter2, 'out_files',
               datasink, 'resting.regress.@compcorr')
    wf.connect(sampleaparc, 'summary_file',
               datasink, 'resting.parcellations.aparc')
    wf.connect(sampleaparc, 'avgwf_txt_file',
               datasink, 'resting.parcellations.aparc.@avgwf')
    wf.connect(ts2txt, 'out_file',
               datasink, 'resting.parcellations.grayo.@subcortical')
    datasink2 = Node(interface=DataSink(), name="datasink2")
    datasink2.inputs.base_directory = sink_directory
    datasink2.inputs.container = subject_id
    datasink2.inputs.substitutions = [('_target_subject_', '')]
    datasink2.inputs.regexp_substitutions = (r'(/_.*(\d+/))', r'/run\2')
    wf.connect(combiner, 'out_file',
               datasink2, 'resting.parcellations.grayo.@surface')
    return wf
예제 #40
0
파일: model.py 프로젝트: boydmeredith/lyman
def create_timeseries_model_workflow(name="model", exp_info=None):

    # Default experiment parameters for generating graph image, testing, etc.
    if exp_info is None:
        exp_info = lyman.default_experiment_parameters()

    # Define constant inputs
    inputs = ["realign_file", "artifact_file", "timeseries"]

    # Possibly add the design and regressor files to the inputs
    if exp_info["design_name"] is not None:
        inputs.append("design_file")
    if exp_info["regressor_file"] is not None:
        inputs.append("regressor_file")

    # Define the workflow inputs
    inputnode = Node(IdentityInterface(inputs), "inputs")

    # Set up the experimental design
    modelsetup = MapNode(ModelSetup(exp_info=exp_info),
                         ["timeseries", "realign_file", "artifact_file"],
                         "modelsetup")

    # For some nodes, make it possible to request extra memory
    mem_request = {"qsub_args": "-l h_vmem=%dG" % exp_info["memory_request"]}

    # Use film_gls to estimate the timeseries model
    modelestimate = MapNode(fsl.FILMGLS(smooth_autocorr=True,
                                        mask_size=5,
                                        threshold=100),
                            ["design_file", "in_file"],
                            "modelestimate")
    modelestimate.plugin_args = mem_request

    # Run the contrast estimation routine
    contrastestimate = MapNode(fsl.ContrastMgr(),
                               ["tcon_file",
                                "dof_file",
                                "corrections",
                                "param_estimates",
                                "sigmasquareds"],
                               "contrastestimate")
    contrastestimate.plugin_args = mem_request

    # Compute summary statistics about the model fit
    modelsummary = MapNode(ModelSummary(),
                           ["design_matrix_pkl",
                            "timeseries",
                            "pe_files"],
                           "modelsummary")
    modelsummary.plugin_args = mem_request

    # Save the experiment info for this run
    # Save the experiment info for this run
    saveparams = MapNode(SaveParameters(exp_info=exp_info),
                         "in_file", "saveparams")

    # Report on the results of the model
    # Note: see below for a conditional iterfield
    modelreport = MapNode(ModelReport(),
                          ["timeseries", "sigmasquareds_file",
                           "tsnr_file", "r2_files"],
                          "modelreport")

    # Define the workflow outputs
    outputnode = Node(IdentityInterface(["results",
                                         "copes",
                                         "varcopes",
                                         "zstats",
                                         "r2_files",
                                         "ss_files",
                                         "tsnr_file",
                                         "report",
                                         "design_mat",
                                         "contrast_mat",
                                         "design_pkl",
                                         "design_report",
                                         "json_file"]),
                      "outputs")

    # Define the workflow and connect the nodes
    model = Workflow(name=name)
    model.connect([
        (inputnode, modelsetup,
            [("realign_file", "realign_file"),
             ("artifact_file", "artifact_file"),
             ("timeseries", "timeseries")]),
        (inputnode, modelestimate,
            [("timeseries", "in_file")]),
        (inputnode, saveparams,
            [("timeseries", "in_file")]),
        (modelsetup, modelestimate,
            [("design_matrix_file", "design_file")]),
        (modelestimate, contrastestimate,
            [("dof_file", "dof_file"),
             ("corrections", "corrections"),
             ("param_estimates", "param_estimates"),
             ("sigmasquareds", "sigmasquareds")]),
        (modelsetup, contrastestimate,
            [("contrast_file", "tcon_file")]),
        (modelsetup, modelsummary,
            [("design_matrix_pkl", "design_matrix_pkl")]),
        (inputnode, modelsummary,
            [("timeseries", "timeseries")]),
        (modelestimate, modelsummary,
            [("param_estimates", "pe_files")]),
        (inputnode, modelreport,
            [("timeseries", "timeseries")]),
        (modelestimate, modelreport,
            [("sigmasquareds", "sigmasquareds_file")]),
        (modelsummary, modelreport,
            [("r2_files", "r2_files"),
             ("tsnr_file", "tsnr_file")]),
        (modelsetup, outputnode,
            [("design_matrix_file", "design_mat"),
             ("contrast_file", "contrast_mat"),
             ("design_matrix_pkl", "design_pkl"),
             ("report", "design_report")]),
        (saveparams, outputnode,
            [("json_file", "json_file")]),
        (modelestimate, outputnode,
            [("results_dir", "results")]),
        (contrastestimate, outputnode,
            [("copes", "copes"),
             ("varcopes", "varcopes"),
             ("zstats", "zstats")]),
        (modelsummary, outputnode,
            [("r2_files", "r2_files"),
             ("ss_files", "ss_files"),
             ("tsnr_file", "tsnr_file")]),
        (modelreport, outputnode,
            [("out_files", "report")]),
        ])

    if exp_info["design_name"] is not None:
        model.connect(inputnode, "design_file",
                      modelsetup, "design_file")
    if exp_info["regressor_file"] is not None:
        model.connect(inputnode, "regressor_file",
                      modelsetup, "regressor_file")
    if exp_info["contrasts"]:
        model.connect(contrastestimate, "zstats",
                      modelreport, "zstat_files")
        modelreport.iterfield.append("zstat_files")

    return model, inputnode, outputnode