def test_compute_avg_wf():
    from nipype import Workflow

    nnodes = 1
    work_dir = 'test_ca_wf_work'
    chunk = chunks
    delay = 0
    benchmark_dir = None
    benchmark = False
    cli = True

    wf = Workflow('test_ca_wf')
    wf.base_dir = work_dir

    inc_1, ca_1 = ca.computed_avg_node('ca_bb',
                                       nnodes, work_dir,
                                       chunk=chunk,
                                       delay=delay,
                                       benchmark_dir=benchmark_dir,
                                       benchmark=benchmark,
                                       cli=cli)
        
    wf.add_nodes([inc_1])

    wf.connect([(inc_1, ca_1, [('inc_chunk', 'chunks')])])
    nodename = 'inc_2_test'
    inc_2, ca_2 = ca.computed_avg_node(nodename, nnodes, work_dir, delay=delay,
                                 benchmark_dir=benchmark_dir,
                                 benchmark=benchmark, cli=cli)

    wf.connect([(ca_1, inc_2, [('avg_chunk', 'avg')])])
    wf.connect([(inc_1, inc_2, [('inc_chunk', 'chunk')])])
    wf_out = wf.run('SLURM',
                    plugin_args={
                      'template': 'benchmark_scripts/nipype_kmeans_template.sh'
                    })

    node_names = [i.name for i in wf_out.nodes()]
    result_dict = dict(zip(node_names, wf_out.nodes()))
    saved_chunks = (result_dict['ca1_{0}'.format(nodename)]
                                 .result
                                 .outputs
                                 .inc_chunk)

    results = [i for c in saved_chunks for i in c]
    
    im_1 = nib.load(chunks[0]).get_data()
    im_3 = nib.load(chunks[1]).get_data()

    avg = ((im_1 + 1) + (im_3 + 1)) / 2

    im_1 = im_1 + 1 + avg + 1
    im_3 = im_3 + 1 + avg + 1

    for i in results:
        assert op.isfile(i)
        if '1' in i:
            assert np.array_equal(nib.load(i).get_data(), im_1)
        else:
            assert np.array_equal(nib.load(i).get_data(), im_3)
示例#2
0
def test_serial_input():
    cwd = os.getcwd()
    wd = mkdtemp()
    os.chdir(wd)
    from nipype import MapNode, Function, Workflow
    def func1(in1):
        return in1
    n1 = MapNode(Function(input_names=['in1'],
                          output_names=['out'],
                          function=func1),
                 iterfield=['in1'],
                 name='n1')
    n1.inputs.in1 = [1,2,3]


    w1 = Workflow(name='test')
    w1.base_dir = wd
    w1.add_nodes([n1])
    # set local check
    w1.config['execution'] = {'stop_on_first_crash': 'true',
                              'local_hash_check': 'true',
                              'crashdump_dir': wd}

    # test output of num_subnodes method when serial is default (False)
    yield assert_equal, n1.num_subnodes(), len(n1.inputs.in1)

    # test running the workflow on default conditions
    error_raised = False
    try:
        w1.run(plugin='MultiProc')
    except Exception, e:
        pe.logger.info('Exception: %s' % str(e))
        error_raised = True
示例#3
0
def init_base_wf(opts: ArgumentParser, layout: BIDSLayout, run_uuid: str,
                 subject_list: list, work_dir: str, output_dir: str):
    workflow = Workflow(name='atlasTransform_wf')
    workflow.base_dir = opts.work_dir

    reportlets_dir = os.path.join(opts.work_dir, 'reportlets')
    for subject_id in subject_list:
        single_subject_wf = init_single_subject_wf(
            opts=opts,
            layout=layout,
            run_uuid=run_uuid,
            work_dir=str(work_dir),
            output_dir=str(output_dir),
            name="single_subject_" + subject_id + "_wf",
            subject_id=subject_id,
            reportlets_dir=reportlets_dir,
        )

        single_subject_wf.config['execution']['crashdump_dir'] = (os.path.join(
            output_dir, "atlasTransform", "sub-" + subject_id, 'log',
            run_uuid))
        for node in single_subject_wf._get_all_nodes():
            node.config = deepcopy(single_subject_wf.config)

        workflow.add_nodes([single_subject_wf])

    return workflow
示例#4
0
def test_serial_input(tmpdir):
    tmpdir.chdir()
    wd = os.getcwd()
    from nipype import MapNode, Function, Workflow

    def func1(in1):
        return in1
    n1 = MapNode(Function(input_names=['in1'],
                          output_names=['out'],
                          function=func1),
                 iterfield=['in1'],
                 name='n1')
    n1.inputs.in1 = [1, 2, 3]

    w1 = Workflow(name='test')
    w1.base_dir = wd
    w1.add_nodes([n1])
    # set local check
    w1.config['execution'] = {'stop_on_first_crash': 'true',
                              'local_hash_check': 'true',
                              'crashdump_dir': wd,
                              'poll_sleep_duration': 2}

    # test output of num_subnodes method when serial is default (False)
    assert n1.num_subnodes() == len(n1.inputs.in1)

    # test running the workflow on default conditions
    w1.run(plugin='MultiProc')

    # test output of num_subnodes method when serial is True
    n1._serial = True
    assert n1.num_subnodes() == 1

    # test running the workflow on serial conditions
    w1.run(plugin='MultiProc')
示例#5
0
def increment_wf(chunk, delay, benchmark, benchmark_dir, cli, wf_name, avg,
                 work_dir):
    from nipype import Workflow
    from nipype_inc import increment_node

    wf = Workflow(wf_name)
    wf.base_dir = work_dir
    idx = 0
    node_names = []

    if any(isinstance(i, list) for i in chunk):
        chunk = [i for c in chunk for i in c]

    print('chunks', chunk)
    for fn in chunk:
        inc1_nname = 'inc_wf_{}'.format(idx)
        inc_1 = increment_node(inc1_nname, fn, delay, benchmark_dir, benchmark,
                               cli, avg)
        wf.add_nodes([inc_1])

        node_names.append(inc1_nname)
        idx += 1

    wf_out = wf.run('MultiProc')
    node_names = [i.name for i in wf_out.nodes()]
    result_dict = dict(zip(node_names, wf_out.nodes()))
    inc_chunks = ([
        result_dict['inc_wf_{}'.format(i)].result.outputs.inc_chunk
        for i in range(0, len(chunk))
    ])
    return inc_chunks
示例#6
0
def test_serial_input():
    cwd = os.getcwd()
    wd = mkdtemp()
    os.chdir(wd)
    from nipype import MapNode, Function, Workflow

    def func1(in1):
        return in1

    n1 = MapNode(Function(input_names=['in1'],
                          output_names=['out'],
                          function=func1),
                 iterfield=['in1'],
                 name='n1')
    n1.inputs.in1 = [1, 2, 3]

    w1 = Workflow(name='test')
    w1.base_dir = wd
    w1.add_nodes([n1])
    # set local check
    w1.config['execution'] = {
        'stop_on_first_crash': 'true',
        'local_hash_check': 'true',
        'crashdump_dir': wd,
        'poll_sleep_duration': 2
    }

    # test output of num_subnodes method when serial is default (False)
    yield assert_equal, n1.num_subnodes(), len(n1.inputs.in1)

    # test running the workflow on default conditions
    error_raised = False
    try:
        w1.run(plugin='MultiProc')
    except Exception as e:
        from nipype.pipeline.engine.base import logger
        logger.info('Exception: %s' % str(e))
        error_raised = True
    yield assert_false, error_raised

    # test output of num_subnodes method when serial is True
    n1._serial = True
    yield assert_equal, n1.num_subnodes(), 1

    # test running the workflow on serial conditions
    error_raised = False
    try:
        w1.run(plugin='MultiProc')
    except Exception as e:
        from nipype.pipeline.engine.base import logger
        logger.info('Exception: %s' % str(e))
        error_raised = True
    yield assert_false, error_raised

    os.chdir(cwd)
    rmtree(wd)
示例#7
0
def define_workflow(subject_list, run_list, experiment_dir, output_dir):
    """run the smooth workflow given subject and runs"""
    # ExtractROI - skip dummy scans
    extract = Node(ExtractROI(t_min=4, t_size=-1, output_type='NIFTI'),
                   name="extract")

    # Smooth - image smoothing
    smooth = Node(Smooth(fwhm=[8, 8, 8]), name="smooth")

    # Mask - applying mask to smoothed
    # mask_func = Node(ApplyMask(output_type='NIFTI'),
    # name="mask_func")

    # Infosource - a function free node to iterate over the list of subject names
    infosource = Node(IdentityInterface(fields=['subject_id', 'run_num']),
                      name="infosource")
    infosource.iterables = [('subject_id', subject_list),
                            ('run_num', run_list)]

    # SelectFiles - to grab the data (alternativ to DataGrabber)
    func_file = opj(
        'sub-{subject_id}', 'func',
        'sub-{subject_id}_task-tsl_run-{run_num}_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz'
    )
    templates = {'func': func_file}
    selectfiles = Node(SelectFiles(templates, base_directory=data_dir),
                       name="selectfiles")

    # Datasink - creates output folder for important outputs
    datasink = Node(DataSink(base_directory=experiment_dir,
                             container=output_dir),
                    name="datasink")

    ## Use the following DataSink output substitutions
    substitutions = [('_subject_id_', 'sub-'), ('ssub', 'sub'),
                     ('_space-MNI152NLin2009cAsym_desc-preproc_', '_fwhm-8_'),
                     ('_fwhm_', ''), ('_roi', '')]
    substitutions += [('_run_num_%s' % r, '') for r in run_list]
    datasink.inputs.substitutions = substitutions

    # Create a preprocessing workflow
    preproc = Workflow(name='preproc')
    preproc.base_dir = opj(experiment_dir, working_dir)

    # Connect all components of the preprocessing workflow (spm smooth)
    preproc.connect([(infosource, selectfiles, [('subject_id', 'subject_id'),
                                                ('run_num', 'run_num')]),
                     (selectfiles, extract, [('func', 'in_file')]),
                     (extract, smooth, [('roi_file', 'in_files')]),
                     (smooth, datasink, [('smoothed_files', 'preproc.@smooth')
                                         ])])
    return preproc
示例#8
0
def test_serial_input(tmpdir):
    wd = str(tmpdir)
    os.chdir(wd)
    from nipype import MapNode, Function, Workflow

    def func1(in1):
        return in1
    n1 = MapNode(Function(input_names=['in1'],
                          output_names=['out'],
                          function=func1),
                 iterfield=['in1'],
                 name='n1')
    n1.inputs.in1 = [1, 2, 3]

    w1 = Workflow(name='test')
    w1.base_dir = wd
    w1.add_nodes([n1])
    # set local check
    w1.config['execution'] = {'stop_on_first_crash': 'true',
                              'local_hash_check': 'true',
                              'crashdump_dir': wd,
                              'poll_sleep_duration': 2}

    # test output of num_subnodes method when serial is default (False)
    assert n1.num_subnodes() == len(n1.inputs.in1)

    # test running the workflow on default conditions
    error_raised = False
    try:
        w1.run(plugin='MultiProc')
    except Exception as e:
        from nipype.pipeline.engine.base import logger
        logger.info('Exception: %s' % str(e))
        error_raised = True
    assert not error_raised

    # test output of num_subnodes method when serial is True
    n1._serial = True
    assert n1.num_subnodes() == 1

    # test running the workflow on serial conditions
    error_raised = False
    try:
        w1.run(plugin='MultiProc')
    except Exception as e:
        from nipype.pipeline.engine.base import logger
        logger.info('Exception: %s' % str(e))
        error_raised = True

    assert not error_raised
示例#9
0
    def build_workflow(self, exp_dir, work_dir):
        coregwf = Workflow(name='coregwf')
        coregwf.base_dir = os.path.join(exp_dir, work_dir)
        coregwf.connect([(self.bet_anat, self.segmentation, [('out_file', 'in_files')]),
                         (self.segmentation, self.threshold, [(('partial_volume_files', get_wm),
                                                     'in_file')]),
                         (self.bet_anat, self.coreg_pre, [('out_file', 'reference')]),
                         (self.threshold, self.coreg_mi, [('out_file', 'wm_seg')]),
                         (self.coreg_pre, self.coreg_mi, [('out_matrix_file', 'in_matrix_file')]),
                         (self.coreg_mi, self.applywarp, [('out_matrix_file', 'in_matrix_file')]),
                         (self.bet_anat, self.applywarp, [('out_file', 'reference')]),
                         (self.coreg_mi, self.applywarp_mean, [('out_matrix_file', 'in_matrix_file')]),
                         (self.bet_anat, self.applywarp_mean, [('out_file', 'reference')]),
                         ])

        return coregwf
示例#10
0
def runNipypeBet(controller, subject_list, anatomical_id, proj_directory):

    infosource = Node(IdentityInterface(fields=['subject_id']),
                      name="infosource")
    infosource.iterables = [('subject_id', subject_list)]

    #anat_file = opj('{subject_id}','{subject_id}_{anatomical_id}.nii')
    seperator = ''
    concat_words = ('{subject_id}_', anatomical_id, '.nii.gz')
    anat_file_name = seperator.join(concat_words)

    if controller.b_radiological_convention.get() == True:
        anat_file = opj('{subject_id}', anat_file_name)
    else:
        anat_file = opj('{subject_id}', 'Intermediate_Files', 'Original_Files',
                        anat_file_name)

    templates = {'anat': anat_file}

    selectfiles = Node(SelectFiles(templates, base_directory=proj_directory),
                       name="selectfiles")

    skullstrip = Node(BET(robust=True,
                          frac=0.5,
                          vertical_gradient=0,
                          output_type='NIFTI_GZ'),
                      name="skullstrip")

    # Datasink - creates output folder for important outputs
    datasink = Node(DataSink(base_directory=proj_directory), name="datasink")

    wf_sub = Workflow(name="wf_sub")
    wf_sub.base_dir = proj_directory
    wf_sub.connect(infosource, "subject_id", selectfiles, "subject_id")
    wf_sub.connect(selectfiles, "anat", skullstrip, "in_file")
    wf_sub.connect(skullstrip, "out_file", datasink, "bet.@out_file")

    substitutions = [('%s_brain' % (anatomical_id), 'brain')]
    # Feed the substitution strings to the DataSink node
    datasink.inputs.substitutions = substitutions
    # Run the workflow again with the substitutions in place
    wf_sub.run(plugin='MultiProc')

    return 'brain'
示例#11
0
def test_mapnode_json():
    """Tests that mapnodes don't generate excess jsons
    """
    cwd = os.getcwd()
    wd = mkdtemp()
    os.chdir(wd)
    from nipype import MapNode, Function, Workflow

    def func1(in1):
        return in1 + 1

    n1 = MapNode(Function(input_names=['in1'],
                          output_names=['out'],
                          function=func1),
                 iterfield=['in1'],
                 name='n1')
    n1.inputs.in1 = [1]
    w1 = Workflow(name='test')
    w1.base_dir = wd
    w1.config['execution']['crashdump_dir'] = wd
    w1.add_nodes([n1])
    w1.run()
    n1.inputs.in1 = [2]
    w1.run()
    # should rerun
    n1.inputs.in1 = [1]
    eg = w1.run()

    node = eg.nodes()[0]
    outjson = glob(os.path.join(node.output_dir(), '_0x*.json'))
    yield assert_equal, len(outjson), 1

    # check that multiple json's don't trigger rerun
    with open(os.path.join(node.output_dir(), 'test.json'), 'wt') as fp:
        fp.write('dummy file')
    w1.config['execution'].update(**{'stop_on_first_rerun': True})
    error_raised = False
    try:
        w1.run()
    except:
        error_raised = True
    yield assert_false, error_raised
    os.chdir(cwd)
    rmtree(wd)
示例#12
0
def test_mapnode_json():
    """Tests that mapnodes don't generate excess jsons
    """
    cwd = os.getcwd()
    wd = mkdtemp()
    os.chdir(wd)
    from nipype import MapNode, Function, Workflow

    def func1(in1):
        return in1 + 1
    n1 = MapNode(Function(input_names=['in1'],
                          output_names=['out'],
                          function=func1),
                 iterfield=['in1'],
                 name='n1')
    n1.inputs.in1 = [1]
    w1 = Workflow(name='test')
    w1.base_dir = wd
    w1.config['execution']['crashdump_dir'] = wd
    w1.add_nodes([n1])
    w1.run()
    n1.inputs.in1 = [2]
    w1.run()
    # should rerun
    n1.inputs.in1 = [1]
    eg = w1.run()

    node = eg.nodes()[0]
    outjson = glob(os.path.join(node.output_dir(), '_0x*.json'))
    yield assert_equal, len(outjson), 1

    # check that multiple json's don't trigger rerun
    with open(os.path.join(node.output_dir(), 'test.json'), 'wt') as fp:
        fp.write('dummy file')
    w1.config['execution'].update(**{'stop_on_first_rerun': True})
    error_raised = False
    try:
        w1.run()
    except:
        error_raised = True
    yield assert_false, error_raised
    os.chdir(cwd)
    rmtree(wd)
示例#13
0
def save_classified_wf(partition,
                       assignments,
                       work_dir,
                       output_dir,
                       iteration,
                       benchmark_dir=None):

    from nipype import Workflow, Node, Function
    import nipype_kmeans as nk
    from time import time
    from benchmark import write_bench
    from socket import gethostname
    try:
        from threading import get_ident
    except Exception as e:
        from thread import get_ident

    start = time()

    res_wf = Workflow('km_classify')
    res_wf.base_dir = work_dir
    c_idx = 0
    for chunk in partition:
        cc = Node(Function(input_names=['img', 'assignments', 'out_dir'],
                           output_names=['out_file'],
                           function=nk.classify_chunks),
                  name='{0}cc_{1}'.format(iteration, c_idx))

        cc.inputs.img = chunk
        cc.inputs.assignments = assignments
        cc.inputs.out_dir = output_dir
        res_wf.add_nodes([cc])

        c_idx += 1

    res_wf.run(plugin='MultiProc')

    end = time()
    if benchmark_dir is not None:
        write_bench('save_classified', start, end, gethostname(), 'partition',
                    get_ident(), benchmark_dir)

    return ('Success', partition)
示例#14
0
def prepare_t1w(bids_dir,
                smriprep_dir,
                out_dir,
                wd_dir,
                crash_dir,
                subjects_sessions,
                n_cpu=1,
                omp_nthreads=1,
                run_wf=True,
                graph=False,
                smriprep06=False):
    _check_versions()
    export_version(out_dir)

    out_dir.mkdir(exist_ok=True, parents=True)

    wf = Workflow(name="meta_prepare_t1")
    wf.base_dir = wd_dir
    wf.config.remove_unnecessary_outputs = False
    wf.config["execution"]["crashdump_dir"] = crash_dir
    wf.config["monitoring"]["enabled"] = "true"

    for subject, session in subjects_sessions:
        name = f"anat_preproc_{subject}_{session}"
        single_ses_wf = init_single_ses_anat_preproc_wf(
            subject=subject,
            session=session,
            bids_dir=bids_dir,
            smriprep_dir=smriprep_dir,
            out_dir=out_dir,
            name=name,
            omp_nthreads=omp_nthreads,
            smriprep06=smriprep06)
        wf.add_nodes([single_ses_wf])
    if graph:
        wf.write_graph("workflow_graph.png", graph2use="exec")
        wf.write_graph("workflow_graph_c.png", graph2use="colored")
    if run_wf:
        wf.run(plugin='MultiProc', plugin_args={'n_procs': n_cpu})
示例#15
0
def test_serial_input(tmpdir):
    tmpdir.chdir()
    wd = os.getcwd()
    from nipype import MapNode, Function, Workflow

    def func1(in1):
        return in1

    n1 = MapNode(
        Function(input_names=["in1"], output_names=["out"], function=func1),
        iterfield=["in1"],
        name="n1",
    )
    n1.inputs.in1 = [1, 2, 3]

    w1 = Workflow(name="test")
    w1.base_dir = wd
    w1.add_nodes([n1])
    # set local check
    w1.config["execution"] = {
        "stop_on_first_crash": "true",
        "local_hash_check": "true",
        "crashdump_dir": wd,
        "poll_sleep_duration": 2,
    }

    # test output of num_subnodes method when serial is default (False)
    assert n1.num_subnodes() == len(n1.inputs.in1)

    # test running the workflow on default conditions
    w1.run(plugin="MultiProc")

    # test output of num_subnodes method when serial is True
    n1._serial = True
    assert n1.num_subnodes() == 1

    # test running the workflow on serial conditions
    w1.run(plugin="MultiProc")
示例#16
0
def save_wf(input_img, output_dir, it, benchmark, benchmark_dir, work_dir):
    from nipype import Workflow
    from nipype_inc import save_node

    wf = Workflow('save_wf')
    wf.base_dir = work_dir
    idx = 0
    for im in input_img:
        sn_name = 'sn_{}'.format(idx)
        sn = save_node(sn_name, im, output_dir, it, benchmark, benchmark_dir)
        wf.add_nodes([sn])
        idx += 1

    wf_out = wf.run('MultiProc')

    node_names = [i.name for i in wf_out.nodes()]
    result_dict = dict(zip(node_names, wf_out.nodes()))
    saved_chunks = ([
        result_dict['sn_{}'.format(i)].result.outputs.output_filename
        for i in range(0, len(input_img))
    ])

    return saved_chunks
示例#17
0
def test_mapnode_json(tmpdir):
    """Tests that mapnodes don't generate excess jsons
    """
    tmpdir.chdir()
    wd = os.getcwd()
    from nipype import MapNode, Function, Workflow

    def func1(in1):
        return in1 + 1

    n1 = MapNode(Function(input_names=['in1'],
                          output_names=['out'],
                          function=func1),
                 iterfield=['in1'],
                 name='n1')
    n1.inputs.in1 = [1]
    w1 = Workflow(name='test')
    w1.base_dir = wd
    w1.config['execution']['crashdump_dir'] = wd
    w1.add_nodes([n1])
    w1.run()
    n1.inputs.in1 = [2]
    w1.run()
    # should rerun
    n1.inputs.in1 = [1]
    eg = w1.run()

    node = list(eg.nodes())[0]
    outjson = glob(os.path.join(node.output_dir(), '_0x*.json'))
    assert len(outjson) == 1

    # check that multiple json's don't trigger rerun
    with open(os.path.join(node.output_dir(), 'test.json'), 'wt') as fp:
        fp.write('dummy file')
    w1.config['execution'].update(**{'stop_on_first_rerun': True})

    w1.run()
示例#18
0
def test_mapnode_json(tmpdir):
    """Tests that mapnodes don't generate excess jsons
    """
    tmpdir.chdir()
    wd = os.getcwd()
    from nipype import MapNode, Function, Workflow

    def func1(in1):
        return in1 + 1

    n1 = MapNode(
        Function(input_names=["in1"], output_names=["out"], function=func1),
        iterfield=["in1"],
        name="n1",
    )
    n1.inputs.in1 = [1]
    w1 = Workflow(name="test")
    w1.base_dir = wd
    w1.config["execution"]["crashdump_dir"] = wd
    w1.add_nodes([n1])
    w1.run()
    n1.inputs.in1 = [2]
    w1.run()
    # should rerun
    n1.inputs.in1 = [1]
    eg = w1.run()

    node = list(eg.nodes())[0]
    outjson = glob(os.path.join(node.output_dir(), "_0x*.json"))
    assert len(outjson) == 1

    # check that multiple json's don't trigger rerun
    with open(os.path.join(node.output_dir(), "test.json"), "wt") as fp:
        fp.write("dummy file")
    w1.config["execution"].update(**{"stop_on_first_rerun": True})

    w1.run()
def iterPipeline(resultsDir, workDir, subDir, subid):
	ANAT_DIR = abspath(join(subDir, 'ses-test/anat'))
	ANAT_T1W = abspath(join(ANAT_DIR,  subid + '_ses-test_T1w.nii.gz'))
	ANAT_BET_T1W=abspath(join(resultsDir, subid + '_ses-test_T1w_bet.nii.gz'))

	betNodeInputs={}
	betNodeInputs['in_file']=ANAT_T1W
	betNodeInputs['mask']=True
	skullstrip = createNiPypeNode(BET(),'skullstrip',betNodeInputs)

	smoothNodeInputs = {}
	isosmooth = createNiPypeNode(IsotropicSmooth(),'isosmooth',smoothNodeInputs)

	isosmooth.iterables = ("fwhm", [4, 8, 16])

	# Create the workflow
	wfName = "isosmoothflow"
	wf = Workflow(name=wfName)
	WF_DIR=abspath(join(workDir, wfName))
	wf.base_dir = WF_DIR
	wf.connect(skullstrip, 'out_file', isosmooth, 'in_file')

	# Run it in parallel (one core for each smoothing kernel)
	wf.run('MultiProc', plugin_args={'n_procs': 3})
	#graph showing summary of embedded workflow
	wfGraph='smoothflow_graph.dot'
	wf.write_graph(join(WF_DIR,wfGraph), graph2use='exec', format='png', simple_form=True)
	wfImg = plt.imread(join(WF_DIR,wfGraph+'.png'))
	plt.imshow(wfImg)
	plt.gca().set_axis_off()
	plt.show()

	# First, let's specify the list of input variables
	subject_list = ['sub-01', 'sub-02', 'sub-03', 'sub-04', 'sub-05']
	session_list = ['ses-retest', 'ses-test']
	fwhm_widths = [4, 8]
示例#20
0
def test_mapnode_json(tmpdir):
    """Tests that mapnodes don't generate excess jsons
    """
    tmpdir.chdir()
    wd = os.getcwd()
    from nipype import MapNode, Function, Workflow

    def func1(in1):
        return in1 + 1
    n1 = MapNode(Function(input_names=['in1'],
                          output_names=['out'],
                          function=func1),
                 iterfield=['in1'],
                 name='n1')
    n1.inputs.in1 = [1]
    w1 = Workflow(name='test')
    w1.base_dir = wd
    w1.config['execution']['crashdump_dir'] = wd
    w1.add_nodes([n1])
    w1.run()
    n1.inputs.in1 = [2]
    w1.run()
    # should rerun
    n1.inputs.in1 = [1]
    eg = w1.run()

    node = list(eg.nodes())[0]
    outjson = glob(os.path.join(node.output_dir(), '_0x*.json'))
    assert len(outjson) == 1

    # check that multiple json's don't trigger rerun
    with open(os.path.join(node.output_dir(), 'test.json'), 'wt') as fp:
        fp.write('dummy file')
    w1.config['execution'].update(**{'stop_on_first_rerun': True})

    w1.run()
def group_onesample_openfmri(dataset_dir,model_id=None,task_id=None,l1output_dir=None,out_dir=None, no_reversal=False):

    wk = Workflow(name='one_sample')
    wk.base_dir = os.path.abspath(work_dir)

    info = Node(util.IdentityInterface(fields=['model_id','task_id','dataset_dir']),
                                        name='infosource')
    info.inputs.model_id=model_id
    info.inputs.task_id=task_id
    info.inputs.dataset_dir=dataset_dir
    
    num_copes=contrasts_num(model_id,task_id,dataset_dir)

    dg = Node(DataGrabber(infields=['model_id','task_id','cope_id'], 
                          outfields=['copes', 'varcopes']),name='grabber')
    dg.inputs.template = os.path.join(l1output_dir,'model%03d/task%03d/*/%scopes/mni/%scope%02d.nii.gz')
    dg.inputs.template_args['copes'] = [['model_id','task_id','', '', 'cope_id']]
    dg.inputs.template_args['varcopes'] = [['model_id','task_id','var', 'var', 'cope_id']]
    dg.iterables=('cope_id',num_copes)

    dg.inputs.sort_filelist = True

    wk.connect(info,'model_id',dg,'model_id')
    wk.connect(info,'task_id',dg,'task_id')

    model = Node(L2Model(), name='l2model')

    wk.connect(dg, ('copes', get_len), model, 'num_copes')

    mergecopes = Node(Merge(dimension='t'), name='merge_copes')
    wk.connect(dg, 'copes', mergecopes, 'in_files')

    mergevarcopes = Node(Merge(dimension='t'), name='merge_varcopes')
    wk.connect(dg, 'varcopes', mergevarcopes, 'in_files')

    mask_file = fsl.Info.standard_image('MNI152_T1_2mm_brain_mask.nii.gz')
    flame = Node(FLAMEO(), name='flameo')
    flame.inputs.mask_file =  mask_file
    flame.inputs.run_mode = 'flame1'

    wk.connect(model, 'design_mat', flame, 'design_file')
    wk.connect(model, 'design_con', flame, 't_con_file')
    wk.connect(mergecopes, 'merged_file', flame, 'cope_file')
    wk.connect(mergevarcopes, 'merged_file', flame, 'var_cope_file')
    wk.connect(model, 'design_grp', flame, 'cov_split_file')

    smoothest = Node(SmoothEstimate(), name='smooth_estimate') 
    wk.connect(flame, 'zstats', smoothest, 'zstat_file')
    smoothest.inputs.mask_file = mask_file

  
    cluster = Node(Cluster(), name='cluster')
    wk.connect(smoothest,'dlh', cluster, 'dlh')
    wk.connect(smoothest, 'volume', cluster, 'volume')
    cluster.inputs.connectivity = 26
    cluster.inputs.threshold=2.3
    cluster.inputs.pthreshold = 0.05
    cluster.inputs.out_threshold_file = True
    cluster.inputs.out_index_file = True
    cluster.inputs.out_localmax_txt_file = True

    wk.connect(flame, 'zstats', cluster, 'in_file')
	 
    ztopval = Node(ImageMaths(op_string='-ztop', suffix='_pval'),
                   name='z2pval')
    wk.connect(flame, 'zstats', ztopval,'in_file')
    
    

    sinker = Node(DataSink(), name='sinker')  
    sinker.inputs.base_directory = os.path.abspath(out_dir)
    sinker.inputs.substitutions = [('_cope_id', 'contrast'),
			            ('_maths__', '_reversed_')]
    
    wk.connect(flame, 'zstats', sinker, 'stats')
    wk.connect(cluster, 'threshold_file', sinker, 'stats.@thr')
    wk.connect(cluster, 'index_file', sinker, 'stats.@index')
    wk.connect(cluster, 'localmax_txt_file', sinker, 'stats.@localmax')
    
    if no_reversal == False:
        zstats_reverse = Node( BinaryMaths()  , name='zstats_reverse')
        zstats_reverse.inputs.operation = 'mul'
        zstats_reverse.inputs.operand_value= -1
        wk.connect(flame, 'zstats', zstats_reverse, 'in_file')

        cluster2=cluster.clone(name='cluster2')
        wk.connect(smoothest,'dlh',cluster2,'dlh')
        wk.connect(smoothest,'volume',cluster2,'volume')
        wk.connect(zstats_reverse,'out_file',cluster2,'in_file')
   
        ztopval2 = ztopval.clone(name='ztopval2')
        wk.connect(zstats_reverse,'out_file',ztopval2,'in_file')

        wk.connect(zstats_reverse,'out_file',sinker,'stats.@neg')
        wk.connect(cluster2,'threshold_file',sinker,'stats.@neg_thr')
        wk.connect(cluster2,'index_file',sinker,'stats.@neg_index')
        wk.connect(cluster2,'localmax_txt_file',sinker,'stats.@neg_localmax')

    return wk
示例#22
0
                    "--cnames",
                    type=str,
                    help="column string dataset in h5 file")
args = parser.parse_args()

try:
    nruns = int(args.nruns)
except:
    raise Exception("number of runs should be an integer")

if not os.path.isdir(args.save_name):
    os.makedirs(args.save_name)

cwd = os.getcwd()
wf = Workflow(name="sing")
wf.base_dir = cwd

Iternode = Node(IdentityInterface(fields=["run_idx"]), name="Iternode")
Iternode.iterables = ("run_idx", np.arange(nruns) + 1)


def write_varbvs(base_dir, data_path, save_name, col_idx, cnames):
    import os
    f_name = "bf-feature_idx-{}.csv".format(col_idx)
    shfile = "\n".join([
        "#!/bin/bash",
        ("R_DEFAULT_PACKAGES= Rscript "
         "{script} {data_path} {save_name} {col_idx} {cnames}")
    ])
    file_name = os.path.join(base_dir, "gtoi_template.r")
    r_file = shfile.format(script=file_name,
                         container=output_dir),
                name="datasink")
substitutions = [('_subject_id_', 'sub_'),
                 ('_out',''),
                 ('_mr_convertaparc_aseg0/',''),
                 ('_mr_convertaparc_aseg1/',''),
                 ('_mr_convertaparc_aseg2/','')]

datasink.inputs.substitutions = substitutions

###############################
#Specify workflow
###############################

preproc = Workflow(name='preproc')
preproc.base_dir = experiment_dir

preproc.connect([(infosource, selectfiles, [('subject_id','subject_id')]),
                 (selectfiles, smooth, [('t1', 'in_file')]),
                 (smooth, recon_all, [('smoothed_file', 'T1_files')]),
                 (recon_all, mr_convertT1,[('T1', 'in_file')]),
                 (recon_all, mr_convertaseg,[('aseg','in_file')]),
                 (recon_all, mr_convertaparc_aseg,[('aparc_aseg','in_file')]),
                 (recon_all, mr_convertbrainmask,[('brainmask','in_file')]),
                 (recon_all, mr_convertbrain,[('brain','in_file')]),
                 (recon_all, mr_convertwmparc,[('wmparc','in_file')]),
                 (recon_all, mr_convertwm,[('wm','in_file')]),
                 (mr_convertT1, datasink,[('out_file','preproc.@T1')]),
                 (mr_convertaseg, datasink,[('out_file','preproc.@aseg')]),
                 (mr_convertaparc_aseg, datasink,[('out_file','preproc.@aparc_aseg')]),
                 (mr_convertbrainmask, datasink,[('out_file','preproc.@brainmask')]),
# Datasink - creates output folder for important outputs
datasink = Node(DataSink(base_directory=experiment_dir,
                         container=output_dir),
                name="datasink")

# Use the following DataSink output substitutions
substitutions = [('_subject_id_', '')]
datasink.inputs.substitutions = substitutions

###
# Specify Normalization Workflow & Connect Nodes

# Initiation of the ANTS normalization workflow
regflow = Workflow(name='regflow')
regflow.base_dir = opj(experiment_dir, working_dir)

# Connect workflow nodes
regflow.connect([(infosource, selectfiles, [('subject_id', 'subject_id')]),
                 (selectfiles, antsreg, [('anat', 'moving_image')]),
                 (antsreg, datasink, [('warped_image',
                                       'antsreg.@warped_image'),
                                      ('inverse_warped_image',
                                       'antsreg.@inverse_warped_image'),
                                      ('composite_transform',
                                       'antsreg.@transform'),
                                      ('inverse_composite_transform',
                                       'antsreg.@inverse_transform')]),
                 ])

###
示例#25
0
    wf = Workflow("MachineLearning_Baseline_{0}".format(session_id))
    datasink = Node(DataSink(), name="DataSink")
    datasink.inputs.base_directory = os.path.join(results_dir, session_id)
    for hemisphere in ("lh", "rh"):
        for matter in ("gm", "wm"):
            wf.connect(
                logb_wf,
                "output_spec.{0}_{1}surface_file".format(hemisphere, matter),
                datasink,
                "EdgePrediction.@{0}_{1}".format(hemisphere, matter),
            )

    logb_wf.inputs.input_spec.t1_file = t1_file
    logb_wf.inputs.input_spec.orig_t1 = t1_file
    logb_wf.inputs.input_spec.t2_file = t2_file
    logb_wf.inputs.input_spec.posteriors = posterior_files
    logb_wf.inputs.input_spec.hncma_file = hncma_atlas
    logb_wf.inputs.input_spec.abc_file = abc_file
    # logb_wf.inputs.input_spec.acpc_transform = identity_transform_file
    logb_wf.inputs.input_spec.rho = direction_files["rho"]
    logb_wf.inputs.input_spec.theta = direction_files["theta"]
    logb_wf.inputs.input_spec.phi = direction_files["phi"]
    logb_wf.inputs.input_spec.lh_white_surface_file = lh_white_surface_file
    logb_wf.inputs.input_spec.rh_white_surface_file = rh_white_surface_file
    logb_wf.inputs.input_spec.wm_classifier_file = wm_classifier_file
    logb_wf.inputs.input_spec.gm_classifier_file = gm_classifier_file
    wf.base_dir = base_dir
    # wf.run(plugin="SGE", plugin_args={"qsub_args": "-q HJ,all.q,COE,UI"})
    # wf.run(plugin="MultiProc", plugin_args={"n_procs": 24})
    wf.run()
示例#26
0
def main():
    parser = argparse.ArgumentParser(description="BigBrain "
                                     "nipype incrementation")
    parser.add_argument('bb_dir',
                        type=str,
                        help='The folder containing BigBrain NIfTI images '
                        '(local fs only) or image file')
    parser.add_argument('output_dir',
                        type=str,
                        help='the folder to save incremented images to '
                        '(local fs only)')
    parser.add_argument('iterations', type=int, help='number of iterations')
    parser.add_argument('--cli',
                        action='store_true',
                        help='use CLI application')
    parser.add_argument('--work_dir', type=str, help='working directory')
    parser.add_argument('--delay',
                        type=float,
                        default=0,
                        help='task duration time (in s)')
    parser.add_argument('--benchmark',
                        action='store_true',
                        help='benchmark pipeline')
    parser.add_argument('--plugin',
                        type=str,
                        choices=['SLURM', 'MultiProc'],
                        default='MultiProc',
                        help='Plugin to use')
    parser.add_argument('--plugin_args',
                        type=str,
                        help='Plugin arguments file in dictionary format')
    parser.add_argument('--nnodes', type=int, help='Number of nodes available')

    args = parser.parse_args()
    start = time()
    wf = Workflow('npinc_bb')

    if args.work_dir is not None:
        wf.base_dir = os.path.abspath(args.work_dir)

    output_dir = os.path.abspath(args.output_dir)

    try:
        os.makedirs(output_dir)
    except Exception as e:
        pass

    benchmark_dir = None
    app_uuid = str(uuid.uuid1())

    if args.benchmark:
        benchmark_dir = os.path.abspath(
            os.path.join(args.output_dir, 'benchmarks-{}'.format(app_uuid)))
        try:
            os.makedirs(benchmark_dir)
        except Exception as e:
            pass

    #benchmark_dir = None
    #args.benchmark = False
    bb_dir = os.path.abspath(args.bb_dir)

    if os.path.isdir(bb_dir):
        # get all files in directory
        bb_files = glob.glob(os.path.join(os.path.abspath(args.bb_dir), '*'))
    elif os.path.isfile(bb_dir):
        bb_files = [bb_dir]
    else:
        bb_files = bb_dir.split(',')

    if args.plugin == 'SLURM':
        bb_files = [bb_files]

    assert args.iterations > 0

    count = 0
    for chunk in bb_files:
        ca = None
        if args.plugin == 'MultiProc':
            inc_1 = increment_node('inc_bb{}'.format(count), chunk, args.delay,
                                   benchmark_dir, args.benchmark, args.cli)
        else:
            inc_1, ca_1 = computed_avg_node('ca_bb{}'.format(count),
                                            args.nnodes,
                                            args.work_dir,
                                            chunk=chunk,
                                            delay=args.delay,
                                            benchmark_dir=benchmark_dir,
                                            benchmark=args.benchmark,
                                            cli=args.cli)

        wf.add_nodes([inc_1])

        if args.plugin == 'SLURM':
            wf.connect([(inc_1, ca_1, [('inc_chunk', 'chunks')])])

        inc_2 = None

        for i in range(0, args.iterations - 1):
            node_name = 'inc_bb{0}_{1}'.format(count, i + 1)
            ca_2 = None

            if args.plugin == 'MultiProc':
                inc_2 = increment_node(node_name,
                                       delay=args.delay,
                                       benchmark_dir=benchmark_dir,
                                       benchmark=args.benchmark,
                                       cli=args.cli)

            else:
                inc_2, ca_2 = computed_avg_node('ca_bb{0}_{1}'.format(
                    count, i + 1),
                                                args.nnodes,
                                                args.work_dir,
                                                delay=args.delay,
                                                benchmark_dir=benchmark_dir,
                                                benchmark=args.benchmark,
                                                cli=args.cli)

            wf.connect([(inc_1, inc_2, [('inc_chunk', 'chunk')])])

            if args.plugin == 'SLURM':
                wf.connect([(ca_1, inc_2, [('avg_chunk', 'avg')])])

                if i < args.iterations - 2:
                    wf.connect([(inc_2, ca_2, [('inc_chunk', 'chunks')])])

            inc_1 = inc_2
            ca_1 = ca_2

        s_nname = 'save_res{}'.format(count)
        save_res = None
        if args.plugin == 'MultiProc':
            save_res = save_node(s_nname, None, output_dir, args.iterations,
                                 args.benchmark, benchmark_dir)
        else:
            save_res = cluster_save(s_nname, None, output_dir, args.iterations,
                                    args.benchmark, benchmark_dir, args.nnodes,
                                    args.work_dir)

        if inc_2 is None:
            wf.connect([(inc_1, save_res, [('inc_chunk', 'input_img')])])
        else:
            wf.connect([(inc_2, save_res, [('inc_chunk', 'input_img')])])

        count += 1

    if args.plugin_args is not None:
        wf.run(plugin=args.plugin, plugin_args={'template': args.plugin_args})
    else:
        wf.run(plugin=args.plugin)

    wf.write_graph(graph2use='colored')

    end = time()

    if args.benchmark:
        fname = 'benchmark-{}.txt'.format(app_uuid)
        benchmark_file = os.path.abspath(os.path.join(args.output_dir, fname))
        print(benchmark_file)

        with open(benchmark_file, 'a+') as bench:
            bench.write('{0} {1} {2} {3} {4} {5}\n'.format(
                'driver_program', start, end, socket.gethostname(), 'allfiles',
                get_ident()))

            for b in os.listdir(benchmark_dir):
                with open(os.path.join(benchmark_dir, b), 'r') as f:
                    bench.write(f.read())

        shutil.rmtree(benchmark_dir)
示例#27
0
selectfiles = MapNode(SelectFiles(
    templates,
    base_directory='/home/rj299/scratch60/mdm_analysis/work/',
    sort_filelist=True),
                      name="selectfiles",
                      iterfield=['subject_id'])

datasink = Node(nio.DataSink(
    base_directory=
    '/home/rj299/scratch60/mdm_analysis/output/imaging/Sink_resp_mon_sv/'),
                name="datasink")

l2analysis = Workflow(name='l2spm_mon_sv_glm_heightp05')

l2analysis.base_dir = '/home/rj299/scratch60/mdm_analysis/work/'

l2analysis.connect([
    (infosource, selectfiles, [('contrast_id', 'contrast_id'),
                               ('subject_id', 'subject_id')]),
    (selectfiles, onesamplettestdes, [('cons', 'in_files')]),
    (onesamplettestdes, level2estimate, [('spm_mat_file', 'spm_mat_file')]),
    (level2estimate, level2conestimate, [('spm_mat_file', 'spm_mat_file'),
                                         ('beta_images', 'beta_images'),
                                         ('residual_image', 'residual_image')
                                         ]),
    (level2conestimate, level2thresh, [
        ('spm_mat_file', 'spm_mat_file'),
        ('spmT_images', 'stat_image'),
    ]),
    (level2conestimate, datasink, [('spm_mat_file',
示例#28
0
info = dict(T1=[['subject_id']])

infosource = Node(IdentityInterface(fields=['subject_id']), name='infosource')
infosource.iterables = ('subject_id', sids)

# Create a datasource node to get the T1 file
datasource = Node(DataGrabber(infields=['subject_id'],outfields=info.keys()),name = 'datasource')
datasource.inputs.template = '%s/%s'
datasource.inputs.base_directory = os.path.abspath('/home/data/madlab/data/mri/seqtrd/')
datasource.inputs.field_template = dict(T1='%s/anatomy/T1_*.nii.gz')
datasource.inputs.template_args = info
datasource.inputs.sort_filelist = True

reconall_node = Node(ReconAll(), name='reconall_node')
reconall_node.inputs.openmp = 2
reconall_node.inputs.subjects_dir = os.environ['SUBJECTS_DIR']
reconall_node.inputs.terminal_output = 'allatonce'
reconall_node.plugin_args={'bsub_args': ('-q PQ_madlab -n 2'), 'overwrite': True}

wf = Workflow(name='fsrecon')

wf.connect(infosource, 'subject_id', datasource, 'subject_id')
wf.connect(infosource, 'subject_id', reconall_node, 'subject_id')
wf.connect(datasource, 'T1', reconall_node, 'T1_files')

wf.base_dir = os.path.abspath('/scratch/madlab/surfaces/seqtrd')
#wf.config['execution']['job_finished_timeout'] = 65

wf.run(plugin='LSF', plugin_args={'bsub_args': ('-q PQ_madlab')})

infosource = Node(util.IdentityInterface(fields=['contrast_id']),
                  name="infosource")
infosource.iterables = [('contrast_id', contrast_list)]

# SelectFiles - to grab the data (alternative to DataGrabber)
templates = {
    'cons': opj('/media/Data/work/datasink/1stLevel/_sub*/',
                '{contrast_id}.nii')
}
selectfiles = Node(SelectFiles(templates,
                               base_directory='/media/Data/work',
                               sort_filelist=True),
                   name="selectfiles")

l2analysis = Workflow(name='spm_l2analysis')
l2analysis.base_dir = opj(data_dir, '/media/Data/work/')

l2analysis.connect([
    (infosource, selectfiles, [
        ('contrast_id', 'contrast_id'),
    ]),
    (selectfiles, onesamplettestdes, [('cons', 'in_files')]),
    (onesamplettestdes, level2estimate, [('spm_mat_file', 'spm_mat_file')]),
    (level2estimate, level2conestimate, [('spm_mat_file', 'spm_mat_file'),
                                         ('beta_images', 'beta_images'),
                                         ('residual_image', 'residual_image')
                                         ]),
    (level2conestimate, level2thresh, [
        ('spm_mat_file', 'spm_mat_file'),
        ('spmT_images', 'stat_image'),
    ]),
示例#30
0
                         name='split_single_file')
split_single_file.inputs.source_data_path = source_data_path
split_single_file.inputs.rb_trig = run_break_trigger

# Create raw_to_bids node
raw_to_bids = MapNode(Function(input_names=[
    'source_data_run_file', 'bids_root', 'run_id', 'subject_id', 'task_name',
    'event_id'
],
                               output_names=[],
                               function=raw2bids),
                      name='raw_to_bids',
                      iterfield=['source_data_run_file', 'run_id'])
raw_to_bids.inputs.bids_root = bids_root
raw_to_bids.inputs.task_name = task_name
raw_to_bids.inputs.event_id = event_id

# Create a preprocessing workflow
raw2bids = Workflow(name='raw2bids')
raw2bids.base_dir = opj(data_dir, working_dir)

# Connect all components of the preprocessing workflow
raw2bids.connect([
    (infosource, selectfiles, [('subject_id', 'subject_id')]),
    (infosource, raw_to_bids, [('subject_id', 'subject_id')]),
    (selectfiles, split_single_file, [('eeg_raw', 'source_data_file')]),
    (split_single_file, raw_to_bids, [('run_files', 'source_data_run_file')]),
    (split_single_file, raw_to_bids, [('run_ids', 'run_id')]),
])

raw2bids.run()
    else:
        from io import StringIO
        data = StringIO(r.content.decode())

    df = pd.read_csv(data)
    max_subjects = df.shape[0]
    if args.num_subjects:
        max_subjects = args.num_subjects
    elif ('CIRCLECI' in os.environ and os.environ['CIRCLECI'] == 'true'):
        max_subjects = 1
    
    meta_wf = Workflow('metaflow')
    count = 0
    for row in df.iterrows():
        wf = create_workflow(row[1].Subject, sink_dir, row[1]['File Path'])
        meta_wf.add_nodes([wf])
        print('Added workflow for: {}'.format(row[1].Subject))
        count = count + 1
        # run this for only one person on CircleCI
        if count >= max_subjects:
            break

    meta_wf.base_dir = work_dir
    meta_wf.config['execution']['remove_unnecessary_files'] = False
    meta_wf.config['execution']['poll_sleep_duration'] = 2
    meta_wf.config['execution']['crashdump_dir'] = work_dir
    if args.plugin_args:
        meta_wf.run(args.plugin, plugin_args=eval(args.plugin_args))
    else:
        meta_wf.run(args.plugin)
示例#32
0
        ('_fwhm_id_%s_subject_id_%s_task_name_empathy/_apply_norm_anat0' %
         (f, sub), 'sub-%s/anat/' % (sub)) for f in fwhm
        for sub in subject_list
    ]
    subjFolders = [
        ('_fwhm_id_%s_subject_id_%s_task_name_empathy/_apply_norm_bold0' %
         (f, sub), 'sub-%s/bold/task-empathy/' % (sub)) for f in fwhm
        for sub in subject_list
    ]

    substitutions.extend(subjFolders)
    datasink.inputs.substitutions = substitutions

    # Initiation of the ANTs normalization workflow
    antsflow = Workflow(name='antsflow')
    antsflow.base_dir = opj(experiment_dir, working_dir)

    # Connect up the ANTs normalization components
    antsflow.connect([
        (infosource, selectfiles, [('subject_id', 'subject_id'),
                                   ('task_name', 'task_name'),
                                   ('fwhm_id', 'fwhm_id')]),
        (selectfiles, apply_norm_anat, [('anat', 'input_image'),
                                        ('transform', 'transforms')]),
        (selectfiles, apply_norm_bold, [('bold', 'input_image'),
                                        ('transform', 'transforms')]),
        (apply_norm_anat, datasink, [('output_image', 'antsflow.@anat')]),
        (apply_norm_bold, datasink, [('output_image', 'antsflow.@bold')])
    ])

    #while subject_list_done_norm != full_subject_list:
示例#33
0
    wf = Workflow("MachineLearning_Baseline_{0}".format(session_id))
    datasink = Node(DataSink(), name="DataSink")
    datasink.inputs.base_directory = os.path.join(results_dir, session_id)
    for hemisphere in ("lh", "rh"):
        for matter in ("gm", "wm"):
            wf.connect(
                logb_wf,
                "output_spec.{0}_{1}surface_file".format(hemisphere, matter),
                datasink,
                "EdgePrediction.@{0}_{1}".format(hemisphere, matter),
            )

    logb_wf.inputs.input_spec.t1_file = t1_file
    logb_wf.inputs.input_spec.orig_t1 = t1_file
    logb_wf.inputs.input_spec.t2_file = t2_file
    logb_wf.inputs.input_spec.posteriors = posterior_files
    logb_wf.inputs.input_spec.hncma_file = hncma_atlas
    logb_wf.inputs.input_spec.abc_file = abc_file
    # logb_wf.inputs.input_spec.acpc_transform = identity_transform_file
    logb_wf.inputs.input_spec.rho = direction_files["rho"]
    logb_wf.inputs.input_spec.theta = direction_files["theta"]
    logb_wf.inputs.input_spec.phi = direction_files["phi"]
    logb_wf.inputs.input_spec.lh_white_surface_file = lh_white_surface_file
    logb_wf.inputs.input_spec.rh_white_surface_file = rh_white_surface_file
    logb_wf.inputs.input_spec.wm_classifier_file = wm_classifier_file
    logb_wf.inputs.input_spec.gm_classifier_file = gm_classifier_file
    wf.base_dir = base_dir
    # wf.run(plugin="SGE", plugin_args={"qsub_args": "-q HJ,all.q,COE,UI"})
    # wf.run(plugin="MultiProc", plugin_args={"n_procs": 24})
    wf.run()
示例#34
0
                  name='datasource')
datasource.inputs.template = '%s/%s'
datasource.inputs.base_directory = os.path.abspath(data_dir)
datasource.inputs.field_template = dict(T1='%s/s1/anatomy/T1_002.nii.gz')
datasource.inputs.template_args = info
datasource.inputs.sort_filelist = True

reconall_node = Node(ReconAll(), name='reconall_node')
reconall_node.inputs.openmp = 2
reconall_node.inputs.args = '-hippocampal-subfields-T1'
reconall_node.inputs.subjects_dir = '/home/data/madlab/surfaces/emuR01'
reconall_node.plugin_args = {
    'sbatch_args': ('-p investor --qos pq_madlab -n 2'),
    'overwrite': True
}

wf = Workflow(name='fsrecon')

wf.connect(infosource, 'subject_id', datasource, 'subject_id')
wf.connect(infosource, 'subject_id', reconall_node, 'subject_id')
wf.connect(datasource, 'T1', reconall_node, 'T1_files')

wf.base_dir = os.path.abspath('/scratch/madlab/emu/')
#wf.config['execution']['job_finished_timeout'] = 65

wf.run(plugin='SLURM',
       plugin_args={
           'sbatch_args': ('-p investor --qos pq_madlab -N 1 -n 1'),
           'overwrite': True
       })
示例#35
0
def sum(a, b):
    return a + b

wf = Workflow('hello')

adder = Node(Function(input_names=['a', 'b'],
                      output_names=['sum'],
                      function=sum),
             name='a_plus_b')

adder.inputs.a = 1
adder.inputs.b = 3

wf.add_nodes([adder])

wf.base_dir = os.getcwd()

eg = wf.run()

list(eg.nodes())[0].result.outputs

def concat(a, b):
    return [a, b]


concater = Node(Function(input_names=['a', 'b'],
                         output_names=['some_list'],
                         function=concat),
                name='concat_a_b')

wf.connect(adder, 'sum', concater, 'a')
示例#36
0
def prepare_flair_intNorm(flair_prep_dir, out_dir, wd_dir, crash_dir, subjects_sessions, flair_acq, n_cpu=-1):
    out_dir.mkdir(exist_ok=True, parents=True)
    export_version(out_dir)

    wf = Workflow(name="prepare_flair_intNorm")
    wf.base_dir = wd_dir
    wf.config.remove_unnecessary_outputs = False
    wf.config["execution"]["crashdump_dir"] = crash_dir
    wf.config["monitoring"]["enabled"] = "true"

    subjects, sessions = list(zip(*subjects_sessions))
    infosource = Node(niu.IdentityInterface(fields=["subject", "session", "flair_acq"]), name="infosource")
    infosource.iterables = [("subject", subjects),
                            ("session", sessions),
                            ]
    infosource.synchronize = True

    def subject_info_fnc(flair_prep_dir, subject, session, flair_acq):
        from pathlib import Path

        sub_ses = f"sub-{subject}_ses-{session}"
        flair_files = list(Path(flair_prep_dir).glob(
            f"sub-{subject}/ses-{session}/anat/{sub_ses}_acq-{flair_acq}_*_FLAIR_biascorr.nii.gz"))
        assert len(flair_files) == 1, f"Expected one file, but found {flair_files}"
        flair_file = flair_files[0]

        brain_masks = list(Path(flair_prep_dir).glob(
            f"sub-{subject}/ses-{session}/anat/{sub_ses}_space-flair{flair_acq}_desc-brainmask.nii.gz"))
        assert len(brain_masks) > 0, f"Expected one file, but found {brain_masks}"
        brain_mask = brain_masks[0]

        out_list = [flair_file, brain_mask]
        return [str(o) for o in out_list]  # as Path is not taken everywhere

    grabber = Node(niu.Function(input_names=["flair_prep_dir", "subject", "session", "flair_acq"],
                                output_names=["flair_file", "brain_mask"],
                                function=subject_info_fnc),
                   name="grabber"
                   )
    grabber.inputs.flair_prep_dir = flair_prep_dir
    grabber.inputs.flair_acq = flair_acq

    wf.connect([(infosource, grabber, [("subject", "subject"),
                                       ("session", "session"),
                                       ]
                 )
                ]
               )

    # adapted from https://gist.github.com/lebedov/94f1caf8a792d80cd91e7b99c1a0c1d7
    # Intensity normalization - subtract minimum, then divide by difference of maximum and minimum:
    img_range = Node(interface=fsl.ImageStats(op_string='-k %s -R'), name='img_range')
    wf.connect(grabber, "flair_file", img_range, "in_file")
    wf.connect(grabber, "brain_mask", img_range, "mask_file")

    def func(in_stat):
        min_val, max_val = in_stat
        return '-sub %s -div %s' % (min_val, (max_val - min_val))

    stat_to_op_string = Node(interface=niu.Function(input_names=['in_stat'],
                                                    output_names=['op_string'],
                                                    function=func),
                             name='stat_to_op_string', iterfield=['in_stat'])
    wf.connect(img_range, "out_stat", stat_to_op_string, "in_stat")

    flair_normalized = Node(interface=fsl.ImageMaths(), name='flair_normalized')
    wf.connect(stat_to_op_string, "op_string", flair_normalized, "op_string")
    wf.connect(grabber, "flair_file", flair_normalized, "in_file")

    base_directory = str(out_dir.parent)
    out_path_base = str(out_dir.name)
    ds_flair_biascorr_intNorm = Node(DerivativesDataSink(base_directory=base_directory, out_path_base=out_path_base),
                                     name="ds_flair_biascorr_intNorm")
    ds_flair_biascorr_intNorm.inputs.suffix = "FLAIR_biascorrIntNorm"
    wf.connect(flair_normalized, "out_file", ds_flair_biascorr_intNorm, "in_file")
    wf.connect(grabber, "flair_file", ds_flair_biascorr_intNorm, "source_file")

    wf.run(plugin='MultiProc', plugin_args={'n_procs': n_cpu})
示例#37
0
def group_multregress_openfmri(dataset_dir, model_id=None, task_id=None, l1output_dir=None, out_dir=None, 
                               no_reversal=False, plugin=None, plugin_args=None, flamemodel='flame1',
                               nonparametric=False, use_spm=False):

    meta_workflow = Workflow(name='mult_regress')
    meta_workflow.base_dir = work_dir
    for task in task_id:
        task_name = get_taskname(dataset_dir, task)
        cope_ids = l1_contrasts_num(model_id, task_name, dataset_dir)
        regressors_needed, contrasts, groups, subj_list = get_sub_vars(dataset_dir, task_name, model_id)
        for idx, contrast in enumerate(contrasts):
            wk = Workflow(name='model_%03d_task_%03d_contrast_%s' % (model_id, task, contrast[0][0]))

            info = Node(util.IdentityInterface(fields=['model_id', 'task_id', 'dataset_dir', 'subj_list']),
                        name='infosource')
            info.inputs.model_id = model_id
            info.inputs.task_id = task
            info.inputs.dataset_dir = dataset_dir
            
            dg = Node(DataGrabber(infields=['model_id', 'task_id', 'cope_id'],
                                  outfields=['copes', 'varcopes']), name='grabber')
            dg.inputs.template = os.path.join(l1output_dir,
                                              'model%03d/task%03d/%s/%scopes/%smni/%scope%02d.nii%s')
            if use_spm:
                dg.inputs.template_args['copes'] = [['model_id', 'task_id', subj_list, '', 'spm/',
                                                     '', 'cope_id', '']]
                dg.inputs.template_args['varcopes'] = [['model_id', 'task_id', subj_list, 'var', 'spm/',
                                                        'var', 'cope_id', '.gz']]
            else:
                dg.inputs.template_args['copes'] = [['model_id', 'task_id', subj_list, '', '', '', 
                                                     'cope_id', '.gz']]
                dg.inputs.template_args['varcopes'] = [['model_id', 'task_id', subj_list, 'var', '',
                                                        'var', 'cope_id', '.gz']]
            dg.iterables=('cope_id', cope_ids)
            dg.inputs.sort_filelist = False

            wk.connect(info, 'model_id', dg, 'model_id')
            wk.connect(info, 'task_id', dg, 'task_id')

            model = Node(MultipleRegressDesign(), name='l2model')
            model.inputs.groups = groups
            model.inputs.contrasts = contrasts[idx]
            model.inputs.regressors = regressors_needed[idx]
            
            mergecopes = Node(Merge(dimension='t'), name='merge_copes')
            wk.connect(dg, 'copes', mergecopes, 'in_files')
            
            if flamemodel != 'ols':
                mergevarcopes = Node(Merge(dimension='t'), name='merge_varcopes')
                wk.connect(dg, 'varcopes', mergevarcopes, 'in_files')
            
            mask_file = fsl.Info.standard_image('MNI152_T1_2mm_brain_mask.nii.gz')
            flame = Node(FLAMEO(), name='flameo')
            flame.inputs.mask_file =  mask_file
            flame.inputs.run_mode = flamemodel
            #flame.inputs.infer_outliers = True

            wk.connect(model, 'design_mat', flame, 'design_file')
            wk.connect(model, 'design_con', flame, 't_con_file')
            wk.connect(mergecopes, 'merged_file', flame, 'cope_file')
            if flamemodel != 'ols':
                wk.connect(mergevarcopes, 'merged_file', flame, 'var_cope_file')
            wk.connect(model, 'design_grp', flame, 'cov_split_file')
            
            if nonparametric:
                palm = Node(Function(input_names=['cope_file', 'design_file', 'contrast_file', 
                                                  'group_file', 'mask_file', 'cluster_threshold'],
                                     output_names=['palm_outputs'],
                                     function=run_palm),
                            name='palm')
                palm.inputs.cluster_threshold = 3.09
                palm.inputs.mask_file = mask_file
                palm.plugin_args = {'sbatch_args': '-p om_all_nodes -N1 -c2 --mem=10G', 'overwrite': True}
                wk.connect(model, 'design_mat', palm, 'design_file')
                wk.connect(model, 'design_con', palm, 'contrast_file')
                wk.connect(mergecopes, 'merged_file', palm, 'cope_file')
                wk.connect(model, 'design_grp', palm, 'group_file')
                
            smoothest = Node(SmoothEstimate(), name='smooth_estimate')
            wk.connect(flame, 'zstats', smoothest, 'zstat_file')
            smoothest.inputs.mask_file = mask_file
        
            cluster = Node(Cluster(), name='cluster')
            wk.connect(smoothest,'dlh', cluster, 'dlh')
            wk.connect(smoothest, 'volume', cluster, 'volume')
            cluster.inputs.connectivity = 26
            cluster.inputs.threshold = 2.3
            cluster.inputs.pthreshold = 0.05
            cluster.inputs.out_threshold_file = True
            cluster.inputs.out_index_file = True
            cluster.inputs.out_localmax_txt_file = True
            
            wk.connect(flame, 'zstats', cluster, 'in_file')
    
            ztopval = Node(ImageMaths(op_string='-ztop', suffix='_pval'),
                           name='z2pval')
            wk.connect(flame, 'zstats', ztopval,'in_file')
            
            sinker = Node(DataSink(), name='sinker')
            sinker.inputs.base_directory = os.path.join(out_dir, 'task%03d' % task, contrast[0][0])
            sinker.inputs.substitutions = [('_cope_id', 'contrast'),
                                           ('_maths_', '_reversed_')]
            
            wk.connect(flame, 'zstats', sinker, 'stats')
            wk.connect(cluster, 'threshold_file', sinker, 'stats.@thr')
            wk.connect(cluster, 'index_file', sinker, 'stats.@index')
            wk.connect(cluster, 'localmax_txt_file', sinker, 'stats.@localmax')
            if nonparametric:
                wk.connect(palm, 'palm_outputs', sinker, 'stats.palm')

            if not no_reversal:
                zstats_reverse = Node( BinaryMaths()  , name='zstats_reverse')
                zstats_reverse.inputs.operation = 'mul'
                zstats_reverse.inputs.operand_value = -1
                wk.connect(flame, 'zstats', zstats_reverse, 'in_file')
                
                cluster2=cluster.clone(name='cluster2')
                wk.connect(smoothest, 'dlh', cluster2, 'dlh')
                wk.connect(smoothest, 'volume', cluster2, 'volume')
                wk.connect(zstats_reverse, 'out_file', cluster2, 'in_file')
                
                ztopval2 = ztopval.clone(name='ztopval2')
                wk.connect(zstats_reverse, 'out_file', ztopval2, 'in_file')
                
                wk.connect(zstats_reverse, 'out_file', sinker, 'stats.@neg')
                wk.connect(cluster2, 'threshold_file', sinker, 'stats.@neg_thr')
                wk.connect(cluster2, 'index_file',sinker, 'stats.@neg_index')
                wk.connect(cluster2, 'localmax_txt_file', sinker, 'stats.@neg_localmax')
            meta_workflow.add_nodes([wk])
    return meta_workflow
示例#38
0
def finish_the_job(fmriprep_dir, subjects, pipeline, work_dir=None):
    """Run common preprocessing steps after fMRIprep.

    Parameters
    ==========
    fmriprep_dir : str
        the root directory of the fMRIprep data
    subjects : list
        the subjects to preprocess
    pipeline : dict
        the preprocessing pipeline (ordered!); possible options are:
            "spatial_smoothing": numeric (FWHM Gaussian kernel in millimeters)
            "temporal_filtering": list (high and cutoff values in seconds)
            "timecourse_normalization": str (method; one of "Zscore" or "PSC")
    work_dir : str, optional
        the working directory (default=None)

    Examples
    ========
    >>> from finish_the_job import finish_the_job
    >>> finish_the_job(fmriprep_dir="/path/to/fmriprep_dir/"
    ...                subjects=[1,2,3],
    ...                pipeline = {"spatial_smoothing": 5,
    ...                            "temporal_filtering": [100, None],
                                   "timecourse_normalization": "Zscore"})


    """

    if type(subjects) not in (list, tuple):
        subjects = (subjects)

    ftj = Workflow(name="finish_the_job")
    if work_dir is not None:
        ftj.base_dir = work_dir  # set working/output directory

    # Get boldfile template
    boldfile_template = Node(utility.Function(
        input_names=["fmriprep_dir", "subject"],
        output_names=["template"],
        function=get_boldfile_template),
                             name='locate_bold_files')
    boldfile_template.inputs.fmriprep_dir = fmriprep_dir
    boldfile_template.iterables = ("subject", subjects)

    # Get inputs
    dg = Node(io.DataGrabber(), name="get_data")
    dg.inputs.sort_filelist = True
    ftj.connect(boldfile_template, "template", dg, "template")

    # Preprocess files
    preprocessing = create_preprocessing_workflow(pipeline=pipeline)
    ftj.connect(dg, "outfiles", preprocessing, "inputspec.in_files")

    # Get output filenames
    filenames = MapNode(utility.Function(
        input_names=["bold_filename", "suffix"],
        output_names=["output_filename"],
        function=get_output_filename),
                        iterfield=["bold_filename"],
                        name='create_output_filenames')
    ftj.connect(preprocessing, "outputspec.suffix", filenames, "suffix")
    ftj.connect(dg, "outfiles", filenames, "bold_filename")

    # Save preprocessed files
    ef = MapNode(io.ExportFile(),
                 iterfield=["in_file", "out_file"],
                 name="save_data")
    ftj.connect(preprocessing, "outputspec.preprocessed_files", ef, "in_file")
    ftj.connect(filenames, "output_filename", ef, "out_file")

    # Run workflow
    if work_dir:
        ftj.write_graph(graph2use="colored", dotfilename="graph_colored.dot")
    ftj.run()
示例#39
0
# Specify the first level node
level1design = Node(interface=spm.Level1Design(), name="level1design")
level1design.inputs.timing_units = modelspec.inputs.output_units
level1design.inputs.interscan_interval = modelspec.inputs.time_repetition
level1design.inputs.bases = {'hrf': {'derivs': [0, 0]}}

level1estimate = Node(interface=spm.EstimateModel(), name="level1estimate")
level1estimate.inputs.estimation_method = {'Classical': 1}

# Setup the contrast estimation process
contrastestimate = Node(interface=spm.EstimateContrast(),
                        name="contrastestimate")
contrastestimate.overwrite = True

l1pipeline = Workflow(name='level1')
l1pipeline.base_dir = output_dir

ds.inputs.base_directory = op.abspath(output_dir)
ds.substitutions = [('_subj_id', ''), ('_task', ''), ('_t1_', ''),
                    ('_kernel_8', '')]

l1pipeline.connect([
    (info_lvl1, sf_func, [('subj_id', 'subj_id'), ('task', 'task'),
                          ('timept', 'timept'), ('kernel', 'kernel')]),
    (sf_func, modelspec, [('func', 'functional_runs'),
                          ('rp', 'realignment_parameters')]),
    (modelspec, level1design, [('session_info', 'session_info')]),
    (info_lvl1, contrastestimate, [('contrasts', 'contrasts')]),
    (level1design, level1estimate, [('spm_mat_file', 'spm_mat_file')]),
    (level1estimate, contrastestimate, [('spm_mat_file', 'spm_mat_file'),
                                        ('beta_images', 'beta_images'),
示例#40
0
    def run(self):

        wf = Workflow('bapp')
        wf.base_dir = os.getcwd()

        # group analysis can be executed if participant analysis is skipped
        p_analysis = None

        # Participant analysis
        if self.do_participant_analysis:

            participants = Node(Function(input_names=['nip'],
                                         output_names=['out'],
                                         function=get_participants),
                                name='get_participants')
            participants.inputs.nip = self

            p_analysis = MapNode(Function(input_names=[
                'nip', 'analysis_level', 'participant_label', 'working_dir'
            ],
                                          output_names=['result'],
                                          function=run_analysis),
                                 iterfield=['participant_label'],
                                 name='run_participant_analysis')

            wf.add_nodes([participants])
            wf.connect(participants, 'out', p_analysis, 'participant_label')

            p_analysis.inputs.analysis_level = 'participant'
            p_analysis.inputs.nip = self
            p_analysis.inputs.working_dir = os.getcwd()

        # Group analysis
        if self.do_group_analysis:
            groups = Node(Function(input_names=[
                'nip', 'analysis_level', 'working_dir', 'dummy_token'
            ],
                                   output_names=['g_result'],
                                   function=run_analysis),
                          name='run_group_analysis')

            groups.inputs.analysis_level = 'group'
            groups.inputs.nip = self
            groups.inputs.working_dir = os.getcwd()

            if p_analysis is not None:
                wf.connect(p_analysis, 'result', groups, 'dummy_token')
            else:
                wf.add_nodes([groups])

        eg = wf.run()

        # Convert to dictionary to more easily extract results
        node_names = [i.name for i in eg.nodes()]
        result_dict = dict(zip(node_names, eg.nodes()))

        if self.do_participant_analysis:
            for res in result_dict[
                    'run_participant_analysis'].result.outputs.get('result'):
                self.pretty_print(res)

        if self.do_group_analysis:
            self.pretty_print(
                result_dict['run_group_analysis'].result.outputs.g_result)
示例#41
0
import os
import numpy as np
from nipype import Function
from nipype import Node
from nipype import Workflow
from nipype import IdentityInterface

ds="/storage/gablab001/data/genus/GIT/genus/fs_cog/pred_diag/data_sets"
data_sets = [os.path.join(ds, x) for x in os.listdir(ds) if ".csv" in x]
response_var = os.path.join(ds, "response.txt")

wf = Workflow(name="classify_disease")
wf.base_dir = "/om/scratch/Sat/ysa"

Iternode = Node(IdentityInterface(fields=['data', 'classifier']), name="Iternode")
Iternode.iterables = [
     ('data', data_sets), 
     ('classifier', ['et', 'lg'])
]

def run(data, classifier, response):
    import numpy as np
    import pandas as pd
    from custom import Mods
    from custom import utils
    
    y = np.genfromtxt(response)
    X = pd.read_csv(data)
    data_mod = data.split('/')[-1].replace('.csv', '')    

    if classifier == 'et':