def test_compute_avg_wf():
    from nipype import Workflow

    nnodes = 1
    work_dir = 'test_ca_wf_work'
    chunk = chunks
    delay = 0
    benchmark_dir = None
    benchmark = False
    cli = True

    wf = Workflow('test_ca_wf')
    wf.base_dir = work_dir

    inc_1, ca_1 = ca.computed_avg_node('ca_bb',
                                       nnodes, work_dir,
                                       chunk=chunk,
                                       delay=delay,
                                       benchmark_dir=benchmark_dir,
                                       benchmark=benchmark,
                                       cli=cli)
        
    wf.add_nodes([inc_1])

    wf.connect([(inc_1, ca_1, [('inc_chunk', 'chunks')])])
    nodename = 'inc_2_test'
    inc_2, ca_2 = ca.computed_avg_node(nodename, nnodes, work_dir, delay=delay,
                                 benchmark_dir=benchmark_dir,
                                 benchmark=benchmark, cli=cli)

    wf.connect([(ca_1, inc_2, [('avg_chunk', 'avg')])])
    wf.connect([(inc_1, inc_2, [('inc_chunk', 'chunk')])])
    wf_out = wf.run('SLURM',
                    plugin_args={
                      'template': 'benchmark_scripts/nipype_kmeans_template.sh'
                    })

    node_names = [i.name for i in wf_out.nodes()]
    result_dict = dict(zip(node_names, wf_out.nodes()))
    saved_chunks = (result_dict['ca1_{0}'.format(nodename)]
                                 .result
                                 .outputs
                                 .inc_chunk)

    results = [i for c in saved_chunks for i in c]
    
    im_1 = nib.load(chunks[0]).get_data()
    im_3 = nib.load(chunks[1]).get_data()

    avg = ((im_1 + 1) + (im_3 + 1)) / 2

    im_1 = im_1 + 1 + avg + 1
    im_3 = im_3 + 1 + avg + 1

    for i in results:
        assert op.isfile(i)
        if '1' in i:
            assert np.array_equal(nib.load(i).get_data(), im_1)
        else:
            assert np.array_equal(nib.load(i).get_data(), im_3)
示例#2
0
def test_serial_input():
    cwd = os.getcwd()
    wd = mkdtemp()
    os.chdir(wd)
    from nipype import MapNode, Function, Workflow
    def func1(in1):
        return in1
    n1 = MapNode(Function(input_names=['in1'],
                          output_names=['out'],
                          function=func1),
                 iterfield=['in1'],
                 name='n1')
    n1.inputs.in1 = [1,2,3]


    w1 = Workflow(name='test')
    w1.base_dir = wd
    w1.add_nodes([n1])
    # set local check
    w1.config['execution'] = {'stop_on_first_crash': 'true',
                              'local_hash_check': 'true',
                              'crashdump_dir': wd}

    # test output of num_subnodes method when serial is default (False)
    yield assert_equal, n1.num_subnodes(), len(n1.inputs.in1)

    # test running the workflow on default conditions
    error_raised = False
    try:
        w1.run(plugin='MultiProc')
    except Exception, e:
        pe.logger.info('Exception: %s' % str(e))
        error_raised = True
示例#3
0
    def register_T1_to_simnibs(self):

        ### run flirt registartion if it has not been run before
        dest_img = os.path.join(self.mesh_dir, 'm2m_' + self.subject,
                                'T1fs_conform.nii.gz')
        if not os.path.exists(
                os.path.join(self.wf_base_dir, 'T1_to_simnibs_registration')):

            flirt = Node(FLIRT(), name='flirt')
            flirt.inputs.in_file = os.path.join(self.mesh_dir,
                                                'm2m_' + self.subject,
                                                'T1fs.nii.gz')
            flirt.inputs.reference = dest_img
            flirt.inputs.out_file = 'T1_in_Simnibs.nii.gz'
            flirt.inputs.out_matrix_file = 'T12Simnibs.mat'
            flirt.inputs.searchr_x = [-180, 180]
            flirt.inputs.searchr_y = [-180, 180]
            flirt.inputs.searchr_z = [-180, 180]

            wf = Workflow(name='T1_to_simnibs_registration',
                          base_dir=self.wf_base_dir)
            wf.add_nodes([flirt])
            wf.run()

        ## path to registration file
        t12simnibs_reg = os.path.join(self.wf_base_dir,
                                      'T1_to_simnibs_registration', 'flirt',
                                      'T12Simnibs.mat')

        return t12simnibs_reg
示例#4
0
def increment_wf(chunk, delay, benchmark, benchmark_dir, cli, wf_name, avg,
                 work_dir):
    from nipype import Workflow
    from nipype_inc import increment_node

    wf = Workflow(wf_name)
    wf.base_dir = work_dir
    idx = 0
    node_names = []

    if any(isinstance(i, list) for i in chunk):
        chunk = [i for c in chunk for i in c]

    print('chunks', chunk)
    for fn in chunk:
        inc1_nname = 'inc_wf_{}'.format(idx)
        inc_1 = increment_node(inc1_nname, fn, delay, benchmark_dir, benchmark,
                               cli, avg)
        wf.add_nodes([inc_1])

        node_names.append(inc1_nname)
        idx += 1

    wf_out = wf.run('MultiProc')
    node_names = [i.name for i in wf_out.nodes()]
    result_dict = dict(zip(node_names, wf_out.nodes()))
    inc_chunks = ([
        result_dict['inc_wf_{}'.format(i)].result.outputs.inc_chunk
        for i in range(0, len(chunk))
    ])
    return inc_chunks
示例#5
0
def init_base_wf(opts: ArgumentParser, layout: BIDSLayout, run_uuid: str,
                 subject_list: list, work_dir: str, output_dir: str):
    workflow = Workflow(name='atlasTransform_wf')
    workflow.base_dir = opts.work_dir

    reportlets_dir = os.path.join(opts.work_dir, 'reportlets')
    for subject_id in subject_list:
        single_subject_wf = init_single_subject_wf(
            opts=opts,
            layout=layout,
            run_uuid=run_uuid,
            work_dir=str(work_dir),
            output_dir=str(output_dir),
            name="single_subject_" + subject_id + "_wf",
            subject_id=subject_id,
            reportlets_dir=reportlets_dir,
        )

        single_subject_wf.config['execution']['crashdump_dir'] = (os.path.join(
            output_dir, "atlasTransform", "sub-" + subject_id, 'log',
            run_uuid))
        for node in single_subject_wf._get_all_nodes():
            node.config = deepcopy(single_subject_wf.config)

        workflow.add_nodes([single_subject_wf])

    return workflow
示例#6
0
def test_serial_input(tmpdir):
    tmpdir.chdir()
    wd = os.getcwd()
    from nipype import MapNode, Function, Workflow

    def func1(in1):
        return in1
    n1 = MapNode(Function(input_names=['in1'],
                          output_names=['out'],
                          function=func1),
                 iterfield=['in1'],
                 name='n1')
    n1.inputs.in1 = [1, 2, 3]

    w1 = Workflow(name='test')
    w1.base_dir = wd
    w1.add_nodes([n1])
    # set local check
    w1.config['execution'] = {'stop_on_first_crash': 'true',
                              'local_hash_check': 'true',
                              'crashdump_dir': wd,
                              'poll_sleep_duration': 2}

    # test output of num_subnodes method when serial is default (False)
    assert n1.num_subnodes() == len(n1.inputs.in1)

    # test running the workflow on default conditions
    w1.run(plugin='MultiProc')

    # test output of num_subnodes method when serial is True
    n1._serial = True
    assert n1.num_subnodes() == 1

    # test running the workflow on serial conditions
    w1.run(plugin='MultiProc')
示例#7
0
def test_serial_input():
    cwd = os.getcwd()
    wd = mkdtemp()
    os.chdir(wd)
    from nipype import MapNode, Function, Workflow

    def func1(in1):
        return in1

    n1 = MapNode(Function(input_names=['in1'],
                          output_names=['out'],
                          function=func1),
                 iterfield=['in1'],
                 name='n1')
    n1.inputs.in1 = [1, 2, 3]

    w1 = Workflow(name='test')
    w1.base_dir = wd
    w1.add_nodes([n1])
    # set local check
    w1.config['execution'] = {
        'stop_on_first_crash': 'true',
        'local_hash_check': 'true',
        'crashdump_dir': wd,
        'poll_sleep_duration': 2
    }

    # test output of num_subnodes method when serial is default (False)
    yield assert_equal, n1.num_subnodes(), len(n1.inputs.in1)

    # test running the workflow on default conditions
    error_raised = False
    try:
        w1.run(plugin='MultiProc')
    except Exception as e:
        from nipype.pipeline.engine.base import logger
        logger.info('Exception: %s' % str(e))
        error_raised = True
    yield assert_false, error_raised

    # test output of num_subnodes method when serial is True
    n1._serial = True
    yield assert_equal, n1.num_subnodes(), 1

    # test running the workflow on serial conditions
    error_raised = False
    try:
        w1.run(plugin='MultiProc')
    except Exception as e:
        from nipype.pipeline.engine.base import logger
        logger.info('Exception: %s' % str(e))
        error_raised = True
    yield assert_false, error_raised

    os.chdir(cwd)
    rmtree(wd)
示例#8
0
def test_serial_input(tmpdir):
    wd = str(tmpdir)
    os.chdir(wd)
    from nipype import MapNode, Function, Workflow

    def func1(in1):
        return in1
    n1 = MapNode(Function(input_names=['in1'],
                          output_names=['out'],
                          function=func1),
                 iterfield=['in1'],
                 name='n1')
    n1.inputs.in1 = [1, 2, 3]

    w1 = Workflow(name='test')
    w1.base_dir = wd
    w1.add_nodes([n1])
    # set local check
    w1.config['execution'] = {'stop_on_first_crash': 'true',
                              'local_hash_check': 'true',
                              'crashdump_dir': wd,
                              'poll_sleep_duration': 2}

    # test output of num_subnodes method when serial is default (False)
    assert n1.num_subnodes() == len(n1.inputs.in1)

    # test running the workflow on default conditions
    error_raised = False
    try:
        w1.run(plugin='MultiProc')
    except Exception as e:
        from nipype.pipeline.engine.base import logger
        logger.info('Exception: %s' % str(e))
        error_raised = True
    assert not error_raised

    # test output of num_subnodes method when serial is True
    n1._serial = True
    assert n1.num_subnodes() == 1

    # test running the workflow on serial conditions
    error_raised = False
    try:
        w1.run(plugin='MultiProc')
    except Exception as e:
        from nipype.pipeline.engine.base import logger
        logger.info('Exception: %s' % str(e))
        error_raised = True

    assert not error_raised
示例#9
0
def test_mapnode_json():
    """Tests that mapnodes don't generate excess jsons
    """
    cwd = os.getcwd()
    wd = mkdtemp()
    os.chdir(wd)
    from nipype import MapNode, Function, Workflow

    def func1(in1):
        return in1 + 1

    n1 = MapNode(Function(input_names=['in1'],
                          output_names=['out'],
                          function=func1),
                 iterfield=['in1'],
                 name='n1')
    n1.inputs.in1 = [1]
    w1 = Workflow(name='test')
    w1.base_dir = wd
    w1.config['execution']['crashdump_dir'] = wd
    w1.add_nodes([n1])
    w1.run()
    n1.inputs.in1 = [2]
    w1.run()
    # should rerun
    n1.inputs.in1 = [1]
    eg = w1.run()

    node = eg.nodes()[0]
    outjson = glob(os.path.join(node.output_dir(), '_0x*.json'))
    yield assert_equal, len(outjson), 1

    # check that multiple json's don't trigger rerun
    with open(os.path.join(node.output_dir(), 'test.json'), 'wt') as fp:
        fp.write('dummy file')
    w1.config['execution'].update(**{'stop_on_first_rerun': True})
    error_raised = False
    try:
        w1.run()
    except:
        error_raised = True
    yield assert_false, error_raised
    os.chdir(cwd)
    rmtree(wd)
示例#10
0
def save_classified_wf(partition,
                       assignments,
                       work_dir,
                       output_dir,
                       iteration,
                       benchmark_dir=None):

    from nipype import Workflow, Node, Function
    import nipype_kmeans as nk
    from time import time
    from benchmark import write_bench
    from socket import gethostname
    try:
        from threading import get_ident
    except Exception as e:
        from thread import get_ident

    start = time()

    res_wf = Workflow('km_classify')
    res_wf.base_dir = work_dir
    c_idx = 0
    for chunk in partition:
        cc = Node(Function(input_names=['img', 'assignments', 'out_dir'],
                           output_names=['out_file'],
                           function=nk.classify_chunks),
                  name='{0}cc_{1}'.format(iteration, c_idx))

        cc.inputs.img = chunk
        cc.inputs.assignments = assignments
        cc.inputs.out_dir = output_dir
        res_wf.add_nodes([cc])

        c_idx += 1

    res_wf.run(plugin='MultiProc')

    end = time()
    if benchmark_dir is not None:
        write_bench('save_classified', start, end, gethostname(), 'partition',
                    get_ident(), benchmark_dir)

    return ('Success', partition)
示例#11
0
def test_mapnode_json():
    """Tests that mapnodes don't generate excess jsons
    """
    cwd = os.getcwd()
    wd = mkdtemp()
    os.chdir(wd)
    from nipype import MapNode, Function, Workflow

    def func1(in1):
        return in1 + 1
    n1 = MapNode(Function(input_names=['in1'],
                          output_names=['out'],
                          function=func1),
                 iterfield=['in1'],
                 name='n1')
    n1.inputs.in1 = [1]
    w1 = Workflow(name='test')
    w1.base_dir = wd
    w1.config['execution']['crashdump_dir'] = wd
    w1.add_nodes([n1])
    w1.run()
    n1.inputs.in1 = [2]
    w1.run()
    # should rerun
    n1.inputs.in1 = [1]
    eg = w1.run()

    node = eg.nodes()[0]
    outjson = glob(os.path.join(node.output_dir(), '_0x*.json'))
    yield assert_equal, len(outjson), 1

    # check that multiple json's don't trigger rerun
    with open(os.path.join(node.output_dir(), 'test.json'), 'wt') as fp:
        fp.write('dummy file')
    w1.config['execution'].update(**{'stop_on_first_rerun': True})
    error_raised = False
    try:
        w1.run()
    except:
        error_raised = True
    yield assert_false, error_raised
    os.chdir(cwd)
    rmtree(wd)
示例#12
0
def prepare_t1w(bids_dir,
                smriprep_dir,
                out_dir,
                wd_dir,
                crash_dir,
                subjects_sessions,
                n_cpu=1,
                omp_nthreads=1,
                run_wf=True,
                graph=False,
                smriprep06=False):
    _check_versions()
    export_version(out_dir)

    out_dir.mkdir(exist_ok=True, parents=True)

    wf = Workflow(name="meta_prepare_t1")
    wf.base_dir = wd_dir
    wf.config.remove_unnecessary_outputs = False
    wf.config["execution"]["crashdump_dir"] = crash_dir
    wf.config["monitoring"]["enabled"] = "true"

    for subject, session in subjects_sessions:
        name = f"anat_preproc_{subject}_{session}"
        single_ses_wf = init_single_ses_anat_preproc_wf(
            subject=subject,
            session=session,
            bids_dir=bids_dir,
            smriprep_dir=smriprep_dir,
            out_dir=out_dir,
            name=name,
            omp_nthreads=omp_nthreads,
            smriprep06=smriprep06)
        wf.add_nodes([single_ses_wf])
    if graph:
        wf.write_graph("workflow_graph.png", graph2use="exec")
        wf.write_graph("workflow_graph_c.png", graph2use="colored")
    if run_wf:
        wf.run(plugin='MultiProc', plugin_args={'n_procs': n_cpu})
示例#13
0
def run_pipeline(p_name, tareas, results):
    wf = Workflow(p_name)
    wf_nodes = []
    import os
    for i in range(len(tareas)):
        modulo = to_call_by_terminal
        nodo = Node(Function(
            input_names=["ruta", "path_in", "path_out", "parameters_list"],
            output_names=["path_out"],
            function=modulo),
                    name=tareas[i][0])
        wf_nodes.append(nodo)
        wf.add_nodes([nodo])
        if i == 0:
            nodo.inputs.ruta = tareas[i][1]
            nodo.inputs.path_in = tareas[i][2][0]

            if not os.path.exists(results):
                os.mkdir(results)
            nodo.inputs.path_out = results
            try:
                nodo.inputs.parameters_list = tareas[i][2][2:]
            except:
                print("no parametros extra")
        else:
            nodo.inputs.ruta = tareas[i][1]
            nodo.inputs.path_out = results
            try:
                nodo.inputs.parameters_list = tareas[i][2][2:]
            except:
                print("no parametros extra")
            wf.connect(wf_nodes[i - 1], 'path_out', nodo, 'path_in')

    try:
        eg = wf.run()
        return list(eg.nodes())[-1].result.outputs.path_out
    except Exception as e:
        return "error"
示例#14
0
def test_serial_input(tmpdir):
    tmpdir.chdir()
    wd = os.getcwd()
    from nipype import MapNode, Function, Workflow

    def func1(in1):
        return in1

    n1 = MapNode(
        Function(input_names=["in1"], output_names=["out"], function=func1),
        iterfield=["in1"],
        name="n1",
    )
    n1.inputs.in1 = [1, 2, 3]

    w1 = Workflow(name="test")
    w1.base_dir = wd
    w1.add_nodes([n1])
    # set local check
    w1.config["execution"] = {
        "stop_on_first_crash": "true",
        "local_hash_check": "true",
        "crashdump_dir": wd,
        "poll_sleep_duration": 2,
    }

    # test output of num_subnodes method when serial is default (False)
    assert n1.num_subnodes() == len(n1.inputs.in1)

    # test running the workflow on default conditions
    w1.run(plugin="MultiProc")

    # test output of num_subnodes method when serial is True
    n1._serial = True
    assert n1.num_subnodes() == 1

    # test running the workflow on serial conditions
    w1.run(plugin="MultiProc")
示例#15
0
def save_wf(input_img, output_dir, it, benchmark, benchmark_dir, work_dir):
    from nipype import Workflow
    from nipype_inc import save_node

    wf = Workflow('save_wf')
    wf.base_dir = work_dir
    idx = 0
    for im in input_img:
        sn_name = 'sn_{}'.format(idx)
        sn = save_node(sn_name, im, output_dir, it, benchmark, benchmark_dir)
        wf.add_nodes([sn])
        idx += 1

    wf_out = wf.run('MultiProc')

    node_names = [i.name for i in wf_out.nodes()]
    result_dict = dict(zip(node_names, wf_out.nodes()))
    saved_chunks = ([
        result_dict['sn_{}'.format(i)].result.outputs.output_filename
        for i in range(0, len(input_img))
    ])

    return saved_chunks
示例#16
0
def test_mapnode_json(tmpdir):
    """Tests that mapnodes don't generate excess jsons
    """
    tmpdir.chdir()
    wd = os.getcwd()
    from nipype import MapNode, Function, Workflow

    def func1(in1):
        return in1 + 1

    n1 = MapNode(Function(input_names=['in1'],
                          output_names=['out'],
                          function=func1),
                 iterfield=['in1'],
                 name='n1')
    n1.inputs.in1 = [1]
    w1 = Workflow(name='test')
    w1.base_dir = wd
    w1.config['execution']['crashdump_dir'] = wd
    w1.add_nodes([n1])
    w1.run()
    n1.inputs.in1 = [2]
    w1.run()
    # should rerun
    n1.inputs.in1 = [1]
    eg = w1.run()

    node = list(eg.nodes())[0]
    outjson = glob(os.path.join(node.output_dir(), '_0x*.json'))
    assert len(outjson) == 1

    # check that multiple json's don't trigger rerun
    with open(os.path.join(node.output_dir(), 'test.json'), 'wt') as fp:
        fp.write('dummy file')
    w1.config['execution'].update(**{'stop_on_first_rerun': True})

    w1.run()
示例#17
0
def test_mapnode_json(tmpdir):
    """Tests that mapnodes don't generate excess jsons
    """
    tmpdir.chdir()
    wd = os.getcwd()
    from nipype import MapNode, Function, Workflow

    def func1(in1):
        return in1 + 1

    n1 = MapNode(
        Function(input_names=["in1"], output_names=["out"], function=func1),
        iterfield=["in1"],
        name="n1",
    )
    n1.inputs.in1 = [1]
    w1 = Workflow(name="test")
    w1.base_dir = wd
    w1.config["execution"]["crashdump_dir"] = wd
    w1.add_nodes([n1])
    w1.run()
    n1.inputs.in1 = [2]
    w1.run()
    # should rerun
    n1.inputs.in1 = [1]
    eg = w1.run()

    node = list(eg.nodes())[0]
    outjson = glob(os.path.join(node.output_dir(), "_0x*.json"))
    assert len(outjson) == 1

    # check that multiple json's don't trigger rerun
    with open(os.path.join(node.output_dir(), "test.json"), "wt") as fp:
        fp.write("dummy file")
    w1.config["execution"].update(**{"stop_on_first_rerun": True})

    w1.run()
示例#18
0
def test_mapnode_json(tmpdir):
    """Tests that mapnodes don't generate excess jsons
    """
    tmpdir.chdir()
    wd = os.getcwd()
    from nipype import MapNode, Function, Workflow

    def func1(in1):
        return in1 + 1
    n1 = MapNode(Function(input_names=['in1'],
                          output_names=['out'],
                          function=func1),
                 iterfield=['in1'],
                 name='n1')
    n1.inputs.in1 = [1]
    w1 = Workflow(name='test')
    w1.base_dir = wd
    w1.config['execution']['crashdump_dir'] = wd
    w1.add_nodes([n1])
    w1.run()
    n1.inputs.in1 = [2]
    w1.run()
    # should rerun
    n1.inputs.in1 = [1]
    eg = w1.run()

    node = list(eg.nodes())[0]
    outjson = glob(os.path.join(node.output_dir(), '_0x*.json'))
    assert len(outjson) == 1

    # check that multiple json's don't trigger rerun
    with open(os.path.join(node.output_dir(), 'test.json'), 'wt') as fp:
        fp.write('dummy file')
    w1.config['execution'].update(**{'stop_on_first_rerun': True})

    w1.run()
def group_multregress_openfmri(dataset_dir,
                               model_id=None,
                               task_id=None,
                               l1output_dir=None,
                               out_dir=None,
                               no_reversal=False,
                               plugin=None,
                               plugin_args=None,
                               flamemodel='flame1',
                               nonparametric=False,
                               use_spm=False):

    meta_workflow = Workflow(name='mult_regress')
    meta_workflow.base_dir = work_dir
    for task in task_id:
        task_name = get_taskname(dataset_dir, task)
        cope_ids = l1_contrasts_num(model_id, task_name, dataset_dir)
        regressors_needed, contrasts, groups, subj_list = get_sub_vars(
            dataset_dir, task_name, model_id)
        for idx, contrast in enumerate(contrasts):
            wk = Workflow(name='model_%03d_task_%03d_contrast_%s' %
                          (model_id, task, contrast[0][0]))

            info = Node(util.IdentityInterface(
                fields=['model_id', 'task_id', 'dataset_dir', 'subj_list']),
                        name='infosource')
            info.inputs.model_id = model_id
            info.inputs.task_id = task
            info.inputs.dataset_dir = dataset_dir

            dg = Node(DataGrabber(infields=['model_id', 'task_id', 'cope_id'],
                                  outfields=['copes', 'varcopes']),
                      name='grabber')
            dg.inputs.template = os.path.join(
                l1output_dir,
                'model%03d/task%03d/%s/%scopes/%smni/%scope%02d.nii%s')
            if use_spm:
                dg.inputs.template_args['copes'] = [[
                    'model_id', 'task_id', subj_list, '', 'spm/', '',
                    'cope_id', ''
                ]]
                dg.inputs.template_args['varcopes'] = [[
                    'model_id', 'task_id', subj_list, 'var', 'spm/', 'var',
                    'cope_id', '.gz'
                ]]
            else:
                dg.inputs.template_args['copes'] = [[
                    'model_id', 'task_id', subj_list, '', '', '', 'cope_id',
                    '.gz'
                ]]
                dg.inputs.template_args['varcopes'] = [[
                    'model_id', 'task_id', subj_list, 'var', '', 'var',
                    'cope_id', '.gz'
                ]]
            dg.iterables = ('cope_id', cope_ids)
            dg.inputs.sort_filelist = False

            wk.connect(info, 'model_id', dg, 'model_id')
            wk.connect(info, 'task_id', dg, 'task_id')

            model = Node(MultipleRegressDesign(), name='l2model')
            model.inputs.groups = groups
            model.inputs.contrasts = contrasts[idx]
            model.inputs.regressors = regressors_needed[idx]

            mergecopes = Node(Merge(dimension='t'), name='merge_copes')
            wk.connect(dg, 'copes', mergecopes, 'in_files')

            if flamemodel != 'ols':
                mergevarcopes = Node(Merge(dimension='t'),
                                     name='merge_varcopes')
                wk.connect(dg, 'varcopes', mergevarcopes, 'in_files')

            mask_file = fsl.Info.standard_image(
                'MNI152_T1_2mm_brain_mask.nii.gz')
            flame = Node(FLAMEO(), name='flameo')
            flame.inputs.mask_file = mask_file
            flame.inputs.run_mode = flamemodel
            #flame.inputs.infer_outliers = True

            wk.connect(model, 'design_mat', flame, 'design_file')
            wk.connect(model, 'design_con', flame, 't_con_file')
            wk.connect(mergecopes, 'merged_file', flame, 'cope_file')
            if flamemodel != 'ols':
                wk.connect(mergevarcopes, 'merged_file', flame,
                           'var_cope_file')
            wk.connect(model, 'design_grp', flame, 'cov_split_file')

            if nonparametric:
                palm = Node(Function(input_names=[
                    'cope_file', 'design_file', 'contrast_file', 'group_file',
                    'mask_file', 'cluster_threshold'
                ],
                                     output_names=['palm_outputs'],
                                     function=run_palm),
                            name='palm')
                palm.inputs.cluster_threshold = 3.09
                palm.inputs.mask_file = mask_file
                palm.plugin_args = {
                    'sbatch_args': '-p om_all_nodes -N1 -c2 --mem=10G',
                    'overwrite': True
                }
                wk.connect(model, 'design_mat', palm, 'design_file')
                wk.connect(model, 'design_con', palm, 'contrast_file')
                wk.connect(mergecopes, 'merged_file', palm, 'cope_file')
                wk.connect(model, 'design_grp', palm, 'group_file')

            smoothest = Node(SmoothEstimate(), name='smooth_estimate')
            wk.connect(flame, 'zstats', smoothest, 'zstat_file')
            smoothest.inputs.mask_file = mask_file

            cluster = Node(Cluster(), name='cluster')
            wk.connect(smoothest, 'dlh', cluster, 'dlh')
            wk.connect(smoothest, 'volume', cluster, 'volume')
            cluster.inputs.connectivity = 26
            cluster.inputs.threshold = 2.3
            cluster.inputs.pthreshold = 0.05
            cluster.inputs.out_threshold_file = True
            cluster.inputs.out_index_file = True
            cluster.inputs.out_localmax_txt_file = True

            wk.connect(flame, 'zstats', cluster, 'in_file')

            ztopval = Node(ImageMaths(op_string='-ztop', suffix='_pval'),
                           name='z2pval')
            wk.connect(flame, 'zstats', ztopval, 'in_file')

            sinker = Node(DataSink(), name='sinker')
            sinker.inputs.base_directory = os.path.join(
                out_dir, 'task%03d' % task, contrast[0][0])
            sinker.inputs.substitutions = [('_cope_id', 'contrast'),
                                           ('_maths_', '_reversed_')]

            wk.connect(flame, 'zstats', sinker, 'stats')
            wk.connect(cluster, 'threshold_file', sinker, 'stats.@thr')
            wk.connect(cluster, 'index_file', sinker, 'stats.@index')
            wk.connect(cluster, 'localmax_txt_file', sinker, 'stats.@localmax')
            if nonparametric:
                wk.connect(palm, 'palm_outputs', sinker, 'stats.palm')

            if not no_reversal:
                zstats_reverse = Node(BinaryMaths(), name='zstats_reverse')
                zstats_reverse.inputs.operation = 'mul'
                zstats_reverse.inputs.operand_value = -1
                wk.connect(flame, 'zstats', zstats_reverse, 'in_file')

                cluster2 = cluster.clone(name='cluster2')
                wk.connect(smoothest, 'dlh', cluster2, 'dlh')
                wk.connect(smoothest, 'volume', cluster2, 'volume')
                wk.connect(zstats_reverse, 'out_file', cluster2, 'in_file')

                ztopval2 = ztopval.clone(name='ztopval2')
                wk.connect(zstats_reverse, 'out_file', ztopval2, 'in_file')

                wk.connect(zstats_reverse, 'out_file', sinker, 'stats.@neg')
                wk.connect(cluster2, 'threshold_file', sinker,
                           'stats.@neg_thr')
                wk.connect(cluster2, 'index_file', sinker, 'stats.@neg_index')
                wk.connect(cluster2, 'localmax_txt_file', sinker,
                           'stats.@neg_localmax')
            meta_workflow.add_nodes([wk])
    return meta_workflow
示例#20
0
def group_multregress_openfmri(dataset_dir, model_id=None, task_id=None, l1output_dir=None, out_dir=None, 
                               no_reversal=False, plugin=None, plugin_args=None, flamemodel='flame1',
                               nonparametric=False, use_spm=False):

    meta_workflow = Workflow(name='mult_regress')
    meta_workflow.base_dir = work_dir
    for task in task_id:
        task_name = get_taskname(dataset_dir, task)
        cope_ids = l1_contrasts_num(model_id, task_name, dataset_dir)
        regressors_needed, contrasts, groups, subj_list = get_sub_vars(dataset_dir, task_name, model_id)
        for idx, contrast in enumerate(contrasts):
            wk = Workflow(name='model_%03d_task_%03d_contrast_%s' % (model_id, task, contrast[0][0]))

            info = Node(util.IdentityInterface(fields=['model_id', 'task_id', 'dataset_dir', 'subj_list']),
                        name='infosource')
            info.inputs.model_id = model_id
            info.inputs.task_id = task
            info.inputs.dataset_dir = dataset_dir
            
            dg = Node(DataGrabber(infields=['model_id', 'task_id', 'cope_id'],
                                  outfields=['copes', 'varcopes']), name='grabber')
            dg.inputs.template = os.path.join(l1output_dir,
                                              'model%03d/task%03d/%s/%scopes/%smni/%scope%02d.nii%s')
            if use_spm:
                dg.inputs.template_args['copes'] = [['model_id', 'task_id', subj_list, '', 'spm/',
                                                     '', 'cope_id', '']]
                dg.inputs.template_args['varcopes'] = [['model_id', 'task_id', subj_list, 'var', 'spm/',
                                                        'var', 'cope_id', '.gz']]
            else:
                dg.inputs.template_args['copes'] = [['model_id', 'task_id', subj_list, '', '', '', 
                                                     'cope_id', '.gz']]
                dg.inputs.template_args['varcopes'] = [['model_id', 'task_id', subj_list, 'var', '',
                                                        'var', 'cope_id', '.gz']]
            dg.iterables=('cope_id', cope_ids)
            dg.inputs.sort_filelist = False

            wk.connect(info, 'model_id', dg, 'model_id')
            wk.connect(info, 'task_id', dg, 'task_id')

            model = Node(MultipleRegressDesign(), name='l2model')
            model.inputs.groups = groups
            model.inputs.contrasts = contrasts[idx]
            model.inputs.regressors = regressors_needed[idx]
            
            mergecopes = Node(Merge(dimension='t'), name='merge_copes')
            wk.connect(dg, 'copes', mergecopes, 'in_files')
            
            if flamemodel != 'ols':
                mergevarcopes = Node(Merge(dimension='t'), name='merge_varcopes')
                wk.connect(dg, 'varcopes', mergevarcopes, 'in_files')
            
            mask_file = fsl.Info.standard_image('MNI152_T1_2mm_brain_mask.nii.gz')
            flame = Node(FLAMEO(), name='flameo')
            flame.inputs.mask_file =  mask_file
            flame.inputs.run_mode = flamemodel
            #flame.inputs.infer_outliers = True

            wk.connect(model, 'design_mat', flame, 'design_file')
            wk.connect(model, 'design_con', flame, 't_con_file')
            wk.connect(mergecopes, 'merged_file', flame, 'cope_file')
            if flamemodel != 'ols':
                wk.connect(mergevarcopes, 'merged_file', flame, 'var_cope_file')
            wk.connect(model, 'design_grp', flame, 'cov_split_file')
            
            if nonparametric:
                palm = Node(Function(input_names=['cope_file', 'design_file', 'contrast_file', 
                                                  'group_file', 'mask_file', 'cluster_threshold'],
                                     output_names=['palm_outputs'],
                                     function=run_palm),
                            name='palm')
                palm.inputs.cluster_threshold = 3.09
                palm.inputs.mask_file = mask_file
                palm.plugin_args = {'sbatch_args': '-p om_all_nodes -N1 -c2 --mem=10G', 'overwrite': True}
                wk.connect(model, 'design_mat', palm, 'design_file')
                wk.connect(model, 'design_con', palm, 'contrast_file')
                wk.connect(mergecopes, 'merged_file', palm, 'cope_file')
                wk.connect(model, 'design_grp', palm, 'group_file')
                
            smoothest = Node(SmoothEstimate(), name='smooth_estimate')
            wk.connect(flame, 'zstats', smoothest, 'zstat_file')
            smoothest.inputs.mask_file = mask_file
        
            cluster = Node(Cluster(), name='cluster')
            wk.connect(smoothest,'dlh', cluster, 'dlh')
            wk.connect(smoothest, 'volume', cluster, 'volume')
            cluster.inputs.connectivity = 26
            cluster.inputs.threshold = 2.3
            cluster.inputs.pthreshold = 0.05
            cluster.inputs.out_threshold_file = True
            cluster.inputs.out_index_file = True
            cluster.inputs.out_localmax_txt_file = True
            
            wk.connect(flame, 'zstats', cluster, 'in_file')
    
            ztopval = Node(ImageMaths(op_string='-ztop', suffix='_pval'),
                           name='z2pval')
            wk.connect(flame, 'zstats', ztopval,'in_file')
            
            sinker = Node(DataSink(), name='sinker')
            sinker.inputs.base_directory = os.path.join(out_dir, 'task%03d' % task, contrast[0][0])
            sinker.inputs.substitutions = [('_cope_id', 'contrast'),
                                           ('_maths_', '_reversed_')]
            
            wk.connect(flame, 'zstats', sinker, 'stats')
            wk.connect(cluster, 'threshold_file', sinker, 'stats.@thr')
            wk.connect(cluster, 'index_file', sinker, 'stats.@index')
            wk.connect(cluster, 'localmax_txt_file', sinker, 'stats.@localmax')
            if nonparametric:
                wk.connect(palm, 'palm_outputs', sinker, 'stats.palm')

            if not no_reversal:
                zstats_reverse = Node( BinaryMaths()  , name='zstats_reverse')
                zstats_reverse.inputs.operation = 'mul'
                zstats_reverse.inputs.operand_value = -1
                wk.connect(flame, 'zstats', zstats_reverse, 'in_file')
                
                cluster2=cluster.clone(name='cluster2')
                wk.connect(smoothest, 'dlh', cluster2, 'dlh')
                wk.connect(smoothest, 'volume', cluster2, 'volume')
                wk.connect(zstats_reverse, 'out_file', cluster2, 'in_file')
                
                ztopval2 = ztopval.clone(name='ztopval2')
                wk.connect(zstats_reverse, 'out_file', ztopval2, 'in_file')
                
                wk.connect(zstats_reverse, 'out_file', sinker, 'stats.@neg')
                wk.connect(cluster2, 'threshold_file', sinker, 'stats.@neg_thr')
                wk.connect(cluster2, 'index_file',sinker, 'stats.@neg_index')
                wk.connect(cluster2, 'localmax_txt_file', sinker, 'stats.@neg_localmax')
            meta_workflow.add_nodes([wk])
    return meta_workflow
示例#21
0
def main():
    parser = argparse.ArgumentParser(description='BigBrain K-means')
    parser.add_argument('bb_dir',
                        type=str,
                        help='The folder containing '
                        'BigBrain NIfTI images (local fs only)')
    parser.add_argument('iters', type=int, help='The number of iterations')
    parser.add_argument('centroids',
                        type=float,
                        nargs='+',
                        help="cluster centroids")
    parser.add_argument('output_dir',
                        type=str,
                        help='the folder to save '
                        'the final centroids to (local fs only)')
    parser.add_argument('--plugin_args',
                        type=str,
                        help='Plugin configuration file')
    parser.add_argument('--nodes', type=int, help='Number of nodes to use')
    parser.add_argument('--benchmark',
                        action='store_true',
                        help='benchmark pipeline')

    args = parser.parse_args()

    start = time()
    output_dir = os.path.abspath(args.output_dir)

    try:
        os.makedirs(output_dir)
    except Exception as e:
        pass

    benchmark_dir = None
    app_uuid = str(uuid.uuid1())

    if args.benchmark:
        benchmark_dir = os.path.abspath(
            os.path.join(args.output_dir, 'benchmarks-{}'.format(app_uuid)))
        try:
            os.makedirs(benchmark_dir)
        except Exception as e:
            pass

    # get all files in directory
    bb_files = glob.glob(os.path.join(os.path.abspath(args.bb_dir), '*'))
    dtype = iinfo('uint16')

    centroids = list(zip(range(0, len(args.centroids)), args.centroids))

    c_changed = True

    idx = 0
    result_dict = {}

    work_dir = os.path.join(os.getcwd(), 'np_km_work')

    f_per_n = ceil(len(bb_files) / args.nodes)
    file_partitions = [
        bb_files[x:x + f_per_n] for x in range(0, len(bb_files), f_per_n)
    ]

    while c_changed and idx < args.iters:
        wf = Workflow('km_bb1_slurm_{}'.format(idx))
        wf.base_dir = work_dir

        gc_nname = 'gc_slurm_part{}'.format(idx)
        pidx = 0
        for fp in file_partitions:

            gc_nname_it = '{0}-{1}'.format(gc_nname, pidx)
            gc = Node(Function(input_names=[
                'partition', 'centroids', 'work_dir', 'benchmark_dir'
            ],
                               output_names=['assignment_files'],
                               function=nearest_centroid_wf),
                      name=gc_nname_it)

            gc.inputs.partition = fp
            gc.inputs.centroids = centroids
            gc.inputs.work_dir = work_dir
            gc.inputs.benchmark_dir = benchmark_dir

            wf.add_nodes([gc])
            pidx += 1

        if args.plugin_args is not None:
            wf_out = wf.run(plugin='SLURM',
                            plugin_args={'template': args.plugin_args})
        else:
            wf_out = wf.run(plugin='SLURM')

        # Convert to dictionary to more easily extract results
        node_names = [i.name for i in wf_out.nodes()]
        result_dict = dict(zip(node_names, wf_out.nodes()))
        assignments = ([
            result_dict['{0}-{1}'.format(gc_nname,
                                         i)].result.outputs.assignment_files
            for i in range(0, len(file_partitions))
        ])

        gr_nname = 'gr_{}'.format(idx)
        uc_nname = 'uc_{}'.format(idx)

        wf = Workflow('km_bb2_slurm_{}'.format(idx))
        wf.base_dir = work_dir
        for c in centroids:
            gr_nname_it = '{0}-{1}'.format(gr_nname, c[0])
            gr = Node(Function(input_names=['centroid', 'assignments'],
                               output_names=['assignment_files'],
                               function=reduceFilesByCentroid),
                      name=gr_nname_it)
            gr.inputs.centroid = c
            gr.inputs.assignments = assignments

            wf.add_nodes([gr])

            uc_nname_it = '{0}-{1}'.format(uc_nname, c[0])
            uc = Node(Function(input_names=['centroid', 'assignments'],
                               output_names=['updated_centroid'],
                               function=update_centroids),
                      name=uc_nname_it)

            uc.inputs.centroid = c

            wf.connect([(gr, uc, [('assignment_files', 'assignments')])])

        if args.plugin_args is not None:
            wf_out = wf.run(plugin='SLURM',
                            plugin_args={'template': args.plugin_args})
        else:
            wf_out = wf.run(plugin='SLURM')

        # Convert to dictionary to more easily extract results
        node_names = [i.name for i in wf_out.nodes()]
        result_dict = dict(zip(node_names, wf_out.nodes()))

        new_centroids = ([
            result_dict['{0}-{1}'.format(uc_nname,
                                         c[0])].result.outputs.updated_centroid
            for c in centroids
        ])

        old_centroids = set(centroids)
        diff = [x for x in new_centroids if x not in old_centroids]
        c_changed = bool(diff)
        centroids = new_centroids

        c_vals = [i[1] for i in centroids]
        idx += 1

        if c_changed and idx < args.iters:
            print("it", idx, c_vals)
        else:
            print("***FINAL CENTROIDS***:", idx, c_vals)

    res_wf = Workflow('km_classify_slurm')
    res_wf.base_dir = work_dir
    c_idx = 0

    assignments = ([
        result_dict['{0}-{1}'.format(gr_nname,
                                     c[0])].result.outputs.assignment_files
        for c in centroids
    ])

    for partition in file_partitions:
        cc = Node(Function(input_names=[
            'partition', 'assignments', 'work_dir', 'output_dir', 'iteration',
            'benchmark_dir'
        ],
                           output_names=['results'],
                           function=save_classified_wf),
                  name='scf_{}'.format(c_idx))
        cc.inputs.partition = partition
        cc.inputs.assignments = assignments
        cc.inputs.work_dir = work_dir
        cc.inputs.output_dir = output_dir
        cc.inputs.iteration = c_idx
        cc.inputs.benchmark_dir = benchmark_dir
        res_wf.add_nodes([cc])
        c_idx += 1

    if args.plugin_args is not None:
        res_wf.run(plugin='SLURM', plugin_args={'template': args.plugin_args})
    else:
        res_wf.run(plugin='SLURM')

    end = time()
    if benchmark_dir is not None:
        write_bench('driver_program', start, end, gethostname(), 'allfiles',
                    get_ident(), benchmark_dir)
示例#22
0
from nipype import Workflow, Node, Function

def sum(a, b):
    return a + b

wf = Workflow('hello')

adder = Node(Function(input_names=['a', 'b'],
                      output_names=['sum'],
                      function=sum),
             name='a_plus_b')

adder.inputs.a = 1
adder.inputs.b = 3

wf.add_nodes([adder])

wf.base_dir = os.getcwd()

eg = wf.run()

list(eg.nodes())[0].result.outputs

def concat(a, b):
    return [a, b]


concater = Node(Function(input_names=['a', 'b'],
                         output_names=['some_list'],
                         function=concat),
                name='concat_a_b')
示例#23
0
def main():
    parser = argparse.ArgumentParser(description='BigBrain K-means')
    parser.add_argument('bb_dir', type=str, help='The folder containing '
                        'BigBrain NIfTI images (local fs only)')
    parser.add_argument('iters', type=int, help='The number of iterations')
    parser.add_argument('output_dir', type=str, help='the folder to save '
                        'the final centroids to (local fs only)')
    parser.add_argument('--benchmark', action='store_true',
                        help='benchmark pipeline')

    args = parser.parse_args()

    start = time()
    output_dir = os.path.abspath(args.output_dir)

    try:
        os.makedirs(output_dir)
    except Exception as e:
        pass

    benchmark_dir = None
    app_uuid = str(uuid.uuid1())
    
    if args.benchmark:
        benchmark_dir = os.path.abspath(os.path.join(args.output_dir,
                                                        'benchmarks-{}'.format(
                                                                    app_uuid)))
        try:
            os.makedirs(benchmark_dir)
        except Exception as e:
            pass

    # get all files in directory
    bb_files = glob.glob(os.path.join(os.path.abspath(args.bb_dir), '*'))
    seed(2)
    dtype = iinfo('uint16')

    centroids = sample(range(dtype.min, dtype.max), 5)

    centroids = list(zip(range(0, len(centroids)), centroids))

    c_changed = True

    idx = 0
    while c_changed and idx < args.iters:
        wf = Workflow('km_bb{}'.format(idx))

        gc_nname = 'gc_{}'.format(idx)
        gc = MapNode(Function(input_names=['img', 'centroids'],
                              output_names=['assignment_files'],
                              function=get_nearest_centroid),
                     name=gc_nname,
                     iterfield=['img'])

        gc.inputs.img = bb_files
        gc.inputs.centroids = centroids

        wf.add_nodes([gc])

        uc_nname = 'uc_{}'.format(idx)
        uc = MapNode(Function(input_names=['centroid', 'assignments'],
                              output_names=['updated_centroids'],
                              function=update_centroids),
                     name=uc_nname,
                     iterfield=['centroid'])

        uc.inputs.centroid = centroids

        wf.connect([(gc, uc, [('assignment_files', 'assignments')])])

        wf_out = wf.run(plugin='MultiProc')

        # Convert to dictionary to more easily extract results
        node_names = [i.name for i in wf_out.nodes()]
        result_dict = dict(zip(node_names, wf_out.nodes()))

        new_centroids = (result_dict[uc_nname].result
                                              .outputs
                                              .updated_centroids)

        old_centroids = set(centroids)
        diff = [x for x in new_centroids if x not in old_centroids]
        c_changed = bool(diff)
        centroids = new_centroids

        c_vals = [i[1] for i in centroids]
        idx += 1

        if c_changed and idx < args.iters:
            print("it", idx, c_vals)
        else:
            print("***FINAL CENTROIDS***:", c_vals)

            res_wf = Workflow('km_classify')

            c_idx = 0
            for chunk in bb_files:
                cc = Node(Function(input_names=['img', 'assignments'],
                                   output_names=['out_file'],
                                   function=classify_chunks),
                          name='cc_{}'.format(c_idx))

                cc.inputs.img = chunk
                cc.inputs.assignments = (result_dict[gc_nname].result
                                         .outputs
                                         .assignment_files)
                res_wf.add_nodes([cc])

                sc = Node(Function(input_names=['img', 'output_dir'],
                                   output_names=['out_file'],
                                   function=save_classified),
                          name='sc_{}'.format(c_idx))

                sc.inputs.output_dir = output_dir

                res_wf.connect([(cc, sc, [('out_file', 'img')])])

                res_wf.run(plugin='MultiProc')

                c_idx += 1

        end = time()

        if args.benchmark:
            fname = 'benchmark-{}.txt'.format(app_uuid)
            benchmark_file = os.path.abspath(os.path.join(args.output_dir,
                                                          fname))
            print(benchmark_file)

            with open(benchmark_file, 'a+') as bench:
                bench.write('{0} {1} {2} {3} '
                            '{4} {5}\n'.format('driver_program',
                                               start,
                                               end,
                                               socket.gethostname(),
                                               'allfiles',
                                               get_ident()))

                for b in os.listdir(benchmark_dir):
                    with open(os.path.join(benchmark_dir, b), 'r') as f:
                        bench.write(f.read())
                        
            rmtree(benchmark_dir)
示例#24
0
def main():
    parser = argparse.ArgumentParser(description="BigBrain "
                                     "nipype incrementation")
    parser.add_argument('bb_dir',
                        type=str,
                        help='The folder containing BigBrain NIfTI images '
                        '(local fs only) or image file')
    parser.add_argument('output_dir',
                        type=str,
                        help='the folder to save incremented images to '
                        '(local fs only)')
    parser.add_argument('iterations', type=int, help='number of iterations')
    parser.add_argument('--cli',
                        action='store_true',
                        help='use CLI application')
    parser.add_argument('--work_dir', type=str, help='working directory')
    parser.add_argument('--delay',
                        type=float,
                        default=0,
                        help='task duration time (in s)')
    parser.add_argument('--benchmark',
                        action='store_true',
                        help='benchmark pipeline')
    parser.add_argument('--plugin',
                        type=str,
                        choices=['SLURM', 'MultiProc'],
                        default='MultiProc',
                        help='Plugin to use')
    parser.add_argument('--plugin_args',
                        type=str,
                        help='Plugin arguments file in dictionary format')
    parser.add_argument('--nnodes', type=int, help='Number of nodes available')

    args = parser.parse_args()
    start = time()
    wf = Workflow('npinc_bb')

    if args.work_dir is not None:
        wf.base_dir = os.path.abspath(args.work_dir)

    output_dir = os.path.abspath(args.output_dir)

    try:
        os.makedirs(output_dir)
    except Exception as e:
        pass

    benchmark_dir = None
    app_uuid = str(uuid.uuid1())

    if args.benchmark:
        benchmark_dir = os.path.abspath(
            os.path.join(args.output_dir, 'benchmarks-{}'.format(app_uuid)))
        try:
            os.makedirs(benchmark_dir)
        except Exception as e:
            pass

    #benchmark_dir = None
    #args.benchmark = False
    bb_dir = os.path.abspath(args.bb_dir)

    if os.path.isdir(bb_dir):
        # get all files in directory
        bb_files = glob.glob(os.path.join(os.path.abspath(args.bb_dir), '*'))
    elif os.path.isfile(bb_dir):
        bb_files = [bb_dir]
    else:
        bb_files = bb_dir.split(',')

    if args.plugin == 'SLURM':
        bb_files = [bb_files]

    assert args.iterations > 0

    count = 0
    for chunk in bb_files:
        ca = None
        if args.plugin == 'MultiProc':
            inc_1 = increment_node('inc_bb{}'.format(count), chunk, args.delay,
                                   benchmark_dir, args.benchmark, args.cli)
        else:
            inc_1, ca_1 = computed_avg_node('ca_bb{}'.format(count),
                                            args.nnodes,
                                            args.work_dir,
                                            chunk=chunk,
                                            delay=args.delay,
                                            benchmark_dir=benchmark_dir,
                                            benchmark=args.benchmark,
                                            cli=args.cli)

        wf.add_nodes([inc_1])

        if args.plugin == 'SLURM':
            wf.connect([(inc_1, ca_1, [('inc_chunk', 'chunks')])])

        inc_2 = None

        for i in range(0, args.iterations - 1):
            node_name = 'inc_bb{0}_{1}'.format(count, i + 1)
            ca_2 = None

            if args.plugin == 'MultiProc':
                inc_2 = increment_node(node_name,
                                       delay=args.delay,
                                       benchmark_dir=benchmark_dir,
                                       benchmark=args.benchmark,
                                       cli=args.cli)

            else:
                inc_2, ca_2 = computed_avg_node('ca_bb{0}_{1}'.format(
                    count, i + 1),
                                                args.nnodes,
                                                args.work_dir,
                                                delay=args.delay,
                                                benchmark_dir=benchmark_dir,
                                                benchmark=args.benchmark,
                                                cli=args.cli)

            wf.connect([(inc_1, inc_2, [('inc_chunk', 'chunk')])])

            if args.plugin == 'SLURM':
                wf.connect([(ca_1, inc_2, [('avg_chunk', 'avg')])])

                if i < args.iterations - 2:
                    wf.connect([(inc_2, ca_2, [('inc_chunk', 'chunks')])])

            inc_1 = inc_2
            ca_1 = ca_2

        s_nname = 'save_res{}'.format(count)
        save_res = None
        if args.plugin == 'MultiProc':
            save_res = save_node(s_nname, None, output_dir, args.iterations,
                                 args.benchmark, benchmark_dir)
        else:
            save_res = cluster_save(s_nname, None, output_dir, args.iterations,
                                    args.benchmark, benchmark_dir, args.nnodes,
                                    args.work_dir)

        if inc_2 is None:
            wf.connect([(inc_1, save_res, [('inc_chunk', 'input_img')])])
        else:
            wf.connect([(inc_2, save_res, [('inc_chunk', 'input_img')])])

        count += 1

    if args.plugin_args is not None:
        wf.run(plugin=args.plugin, plugin_args={'template': args.plugin_args})
    else:
        wf.run(plugin=args.plugin)

    wf.write_graph(graph2use='colored')

    end = time()

    if args.benchmark:
        fname = 'benchmark-{}.txt'.format(app_uuid)
        benchmark_file = os.path.abspath(os.path.join(args.output_dir, fname))
        print(benchmark_file)

        with open(benchmark_file, 'a+') as bench:
            bench.write('{0} {1} {2} {3} {4} {5}\n'.format(
                'driver_program', start, end, socket.gethostname(), 'allfiles',
                get_ident()))

            for b in os.listdir(benchmark_dir):
                with open(os.path.join(benchmark_dir, b), 'r') as f:
                    bench.write(f.read())

        shutil.rmtree(benchmark_dir)
示例#25
0
    def workflow_selector(input_file, ID, atlas_select, network, node_size, mask, thr, parlistfile, multi_nets, conn_model, dens_thresh, conf, adapt_thresh, plot_switch, bedpostx_dir, anat_loc, parc, ref_txt, procmem, dir_path, multi_thr, multi_atlas, max_thr, min_thr, step_thr, k, clust_mask, k_min, k_max, k_step, k_clustering, user_atlas_list, clust_mask_list, prune, node_size_list):
        from pynets import workflows, utils
        from nipype import Node, Workflow, Function

        ##Workflow 1: Whole-brain functional connectome
        if bedpostx_dir is None and network is None:
            sub_func_wf = workflows.wb_functional_connectometry(input_file, ID, atlas_select, network, node_size, mask, thr, parlistfile, conn_model, dens_thresh, conf, plot_switch, parc, ref_txt, procmem, dir_path, multi_thr, multi_atlas, max_thr, min_thr, step_thr, k, clust_mask, k_min, k_max, k_step, k_clustering, user_atlas_list, clust_mask_list, node_size_list)
            sub_struct_wf = None
        ##Workflow 2: RSN functional connectome
        elif bedpostx_dir is None and network is not None:
            sub_func_wf = workflows.rsn_functional_connectometry(input_file, ID, atlas_select, network, node_size, mask, thr, parlistfile, multi_nets, conn_model, dens_thresh, conf, plot_switch, parc, ref_txt, procmem, dir_path, multi_thr, multi_atlas, max_thr, min_thr, step_thr, k, clust_mask, k_min, k_max, k_step, k_clustering, user_atlas_list, clust_mask_list, node_size_list)
            sub_struct_wf = None
        ##Workflow 3: Whole-brain structural connectome
        elif bedpostx_dir is not None and network is None:
            sub_struct_wf = workflows.wb_structural_connectometry(ID, atlas_select, network, node_size, mask, parlistfile, plot_switch, parc, ref_txt, procmem, dir_path, bedpostx_dir, anat_loc, thr, dens_thresh, conn_model)
            sub_func_wf = None
        ##Workflow 4: RSN structural connectome
        elif bedpostx_dir is not None and network is not None:
            sub_struct_wf = workflows.rsn_structural_connectometry(ID, atlas_select, network, node_size, mask, parlistfile, plot_switch, parc, ref_txt, procmem, dir_path, bedpostx_dir, anat_loc, thr, dens_thresh, conn_model)
            sub_func_wf = None
            
        base_wf = sub_func_wf if sub_func_wf else sub_struct_wf

        ##Create meta-workflow to organize graph simulation sets in prep for analysis
        ##Credit: @Mathias Goncalves
        meta_wf = Workflow(name='meta')
        meta_wf.add_nodes([base_wf])
        
        import_list = ['import sys',
                       'import os',
                       'import nibabel as nib'
                       'import numpy as np',
                       'from pynets import utils']
        
        comp_iter = Node(Function(function=utils.compile_iterfields, 
                                  input_names = ['input_file', 'ID', 'atlas_select', 
                                                 'network', 'node_size', 'mask', 'thr', 
                                                 'parlistfile', 'multi_nets', 'conn_model', 
                                                 'dens_thresh', 'dir_path', 'multi_thr', 
                                                 'multi_atlas', 'max_thr', 'min_thr', 'step_thr', 
                                                 'k', 'clust_mask', 'k_min', 'k_max', 'k_step', 
                                                 'k_clustering', 'user_atlas_list', 'clust_mask_list', 
                                                 'prune', 'node_size_list', 'est_path'], 
                                  output_names = ['est_path', 'thr', 'network', 'ID', 'mask', 'conn_model', 
                                                  'k_clustering', 'prune', 'node_size']), 
                         name='compile_iterfields', imports = import_list)
        
        comp_iter.inputs.input_file = input_file
        comp_iter.inputs.ID = ID
        comp_iter.inputs.atlas_select = atlas_select
        comp_iter.inputs.mask = mask
        comp_iter.inputs.parlistfile = parlistfile
        comp_iter.inputs.multi_nets = multi_nets
        comp_iter.inputs.conn_model = conn_model
        comp_iter.inputs.dens_thresh = dens_thresh
        comp_iter.inputs.dir_path = dir_path
        comp_iter.inputs.multi_thr = multi_thr
        comp_iter.inputs.multi_atlas = multi_atlas
        comp_iter.inputs.max_thr = max_thr
        comp_iter.inputs.min_thr = min_thr
        comp_iter.inputs.step_thr = step_thr
        comp_iter.inputs.k = k
        comp_iter.inputs.clust_mask = clust_mask
        comp_iter.inputs.k_min = k_min
        comp_iter.inputs.k_max = k_max
        comp_iter.inputs.k_step = k_step
        comp_iter.inputs.k_clustering = k_clustering
        comp_iter.inputs.user_atlas_list = user_atlas_list
        comp_iter.inputs.clust_mask_list = clust_mask_list
        comp_iter.inputs.prune = prune
        comp_iter.inputs.node_size_list = node_size_list

        meta_wf.connect(base_wf, "outputnode.est_path", comp_iter, "est_path")
        meta_wf.connect(base_wf, "outputnode.thr", comp_iter, "thr")
        meta_wf.connect(base_wf, "outputnode.network", comp_iter, "network")
        meta_wf.connect(base_wf, "outputnode.node_size", comp_iter, "node_size")
        meta_wf.config['logging']['log_directory']='/tmp'
        meta_wf.config['logging']['workflow_level']='DEBUG'
        meta_wf.config['logging']['utils_level']='DEBUG'
        meta_wf.config['logging']['interface_level']='DEBUG'
        meta_wf.write_graph(graph2use='exec', format='png', dotfilename='meta_wf.dot')
        egg = meta_wf.run('MultiProc')
        outputs = [x for x in egg.nodes() if x.name == 'compile_iterfields'][0].result.outputs

        return(outputs.thr, outputs.est_path, outputs.ID, outputs.network, outputs.conn_model, outputs.mask, outputs.prune, outputs.node_size)
示例#26
0
def create_preproc_workflow(session):
    """
    Defines simple functional preprocessing workflow, including motion
    correction, registration to distortion scans, and unwarping. Assumes
    recon-all has been performed on T1, and computes but does not apply
    registration to anatomy.
    """

    #---Create workflow---
    wf = Workflow(name='workflow', base_dir=session['working_dir'])

    #---EPI Realignment---

    # Realign every TR in each functional run to the sbref image using mcflirt
    realign = MapNode(fsl.MCFLIRT(ref_file=session['sbref'],
                                  save_mats=True,
                                  save_plots=True),
                      iterfield=['in_file'],
                      name='realign')
    realign.inputs.in_file = session['epis']
    wf.add_nodes([realign])

    #---Registration to distortion scan---

    # Register the sbref scan to the distortion scan with the same PE using flirt
    reg2dist = Node(fsl.FLIRT(in_file=session['sbref'],
                              reference=session['distort_PE'],
                              out_file='sbref_reg.nii.gz',
                              out_matrix_file='sbref2dist.mat',
                              dof=6),
                    name='reg2distort')
    wf.add_nodes([reg2dist])

    #---Distortion correction---

    # Merge the two distortion scans for unwarping
    distort_scans = [session['distort_PE'], session['distort_revPE']]
    merge_dist = Node(fsl.Merge(in_files=distort_scans,
                                dimension='t',
                                merged_file='distortion_merged.nii.gz'),
                      name='merge_distort')
    wf.add_nodes([merge_dist])

    # Run topup to estimate warpfield and create unwarped distortion scans
    if '-' not in session['PE_dim']:
        PEs = np.repeat([session['PE_dim'], session['PE_dim'] + '-'], 3)
    else:
        PEs = np.repeat(
            [session['PE_dim'], session['PE_dim'].replace('-', '')], 3)
    unwarp_dist = Node(fsl.TOPUP(encoding_direction=list(PEs),
                                 readout_times=[1, 1, 1, 1, 1, 1],
                                 config='b02b0.cnf',
                                 fwhm=0),
                       name='unwarp_distort')
    wf.connect(merge_dist, 'merged_file', unwarp_dist, 'in_file')

    # Unwarp sbref image in case it's useful
    unwarp_sbref = Node(fsl.ApplyTOPUP(in_index=[1], method='jac'),
                        name='unwarp_sbref')
    wf.connect([(reg2dist, unwarp_sbref, [('out_file', 'in_files')]),
                (unwarp_dist, unwarp_sbref,
                 [('out_enc_file', 'encoding_file'),
                  ('out_fieldcoef', 'in_topup_fieldcoef'),
                  ('out_movpar', 'in_topup_movpar')])])

    #---Registration to anatomy---

    # Create mean unwarped distortion scan
    mean_unwarped_dist = Node(fsl.MeanImage(dimension='T'),
                              name='mean_unwarped_distort')
    wf.connect(unwarp_dist, 'out_corrected', mean_unwarped_dist, 'in_file')

    # Register mean unwarped distortion scan to anatomy using bbregister
    reg2anat = Node(fs.BBRegister(
        subject_id=session['Freesurfer_subject_name'],
        contrast_type='t2',
        init='fsl',
        out_reg_file='distort2anat_tkreg.dat',
        out_fsl_file='distort2anat_flirt.mat'),
                    name='reg2anat')
    wf.connect(mean_unwarped_dist, 'out_file', reg2anat, 'source_file')

    #---Combine and apply transforms to EPIs---

    # Split EPI runs into 3D files
    split_epis = MapNode(fsl.Split(dimension='t'),
                         iterfield=['in_file'],
                         name='split_epis')
    split_epis.inputs.in_file = session['epis']
    wf.add_nodes([split_epis])

    # Combine the rigid transforms to be applied to each EPI volume
    concat_rigids = MapNode(fsl.ConvertXFM(concat_xfm=True),
                            iterfield=['in_file'],
                            nested=True,
                            name='concat_rigids')
    wf.connect([(realign, concat_rigids, [('mat_file', 'in_file')]),
                (reg2dist, concat_rigids, [('out_matrix_file', 'in_file2')])])

    # Apply rigid transforms and warpfield to each EPI volume
    correct_epis = MapNode(fsl.ApplyWarp(interp='spline', relwarp=True),
                           iterfield=['in_file', 'ref_file', 'premat'],
                           nested=True,
                           name='correct_epis')

    get_warp = lambda warpfields: warpfields[0]
    wf.connect([(split_epis, correct_epis, [('out_files', 'in_file'),
                                            ('out_files', 'ref_file')]),
                (concat_rigids, correct_epis, [('out_file', 'premat')]),
                (unwarp_dist, correct_epis, [(('out_warps', get_warp),
                                              'field_file')])])

    # Merge processed files back into 4D nifti
    merge_epis = MapNode(fsl.Merge(dimension='t',
                                   merged_file='timeseries_corrected.nii.gz'),
                         iterfield='in_files',
                         name='merge_epis')
    wf.connect([(correct_epis, merge_epis, [('out_file', 'in_files')])])

    #---Copy important files to main directory---
    substitutions = [('_merge_epis%d/timeseries_corrected.nii.gz' % i, n)
                     for i, n in enumerate(session['out_names'])]
    ds = Node(DataSink(base_directory=os.path.abspath(session['out']),
                       substitutions=substitutions),
              name='outfiles')
    wf.connect(unwarp_dist, 'out_corrected', ds, '@unwarp_dist')
    wf.connect(mean_unwarped_dist, 'out_file', ds, '@mean_unwarped_dist')
    wf.connect(unwarp_sbref, 'out_corrected', ds, '@unwarp_sbref')
    wf.connect(reg2anat, 'out_reg_file', ds, '@reg2anat')
    wf.connect(merge_epis, 'merged_file', ds, '@merge_epis')

    return wf
t1_wf = Workflow(name='t1')


def setup_t1_workflow():
    """
    Sets up the workflow that deals specifically with the pipeline that includes the transformation to structural space intermediate step
    :return:
    """
    t1_wf.connect(skullstrip_structural_node, 'out_file', flirt_node,
                  'reference')
    t1_wf.connect(flirt_node, 'out_matrix_file', coreg_to_struct_space_node,
                  'in_matrix_file')


accept_input.add_nodes([input_handler_node])

full_process = Workflow(name='full_process')


def setup_full_process(results_directory=config['results_directory'],
                       bias_correction=config['bias_correction'],
                       reg=config['registration'],
                       graphs=config['graphs']):

    full_process.connect(get_transforms,
                         'coreg_to_template_space.output_image',
                         iso_smooth_node, 'in_file')
    full_process.base_dir = results_directory
    full_process.connect(accept_input, 'input_handler.time_series', make_rcfe,
                         'mcflirt.in_file')
示例#28
0
def create_resting_workflow(args, workdir, outdir):
    if not os.path.exists(args.fsdir):
        raise ValueError('FreeSurfer directory has to exist')

    # remap freesurfer directory to a working directory
    if not os.path.exists(workdir):
        os.makedirs(workdir)

    # create a local subjects dir
    new_subjects_dir = os.path.join(workdir, 'subjects_dir')
    if not os.path.exists(new_subjects_dir):
        os.mkdir(new_subjects_dir)

    # create a link for each freesurfer target
    from glob import glob
    res = CommandLine('which mri_convert').run()
    average_dirs = glob(os.path.join(os.path.dirname(res.runtime.stdout), '..', 'subjects', ('*average*')))

    for dirname in average_dirs:
        dirlink = os.path.join(new_subjects_dir, dirname.split('/')[-1])
        if not os.path.islink(dirlink):
            os.symlink(os.path.realpath(dirname), dirlink)

    meta_wf = Workflow('meta_level')
    subjects_to_analyze = []
    bids_dir = os.path.abspath(args.bids_dir)
    # only for a subset of subjects
    if args.participant_label:
        subjects_to_analyze = ['sub-{}'.format(val) for val in args.participant_label]
    # for all subjects
    else:
        subject_dirs = sorted(glob(os.path.join(bids_dir, "sub-*")))
        subjects_to_analyze = [subject_dir.split("/")[-1] for subject_dir in subject_dirs]

    for subject_label in subjects_to_analyze:
        # create a link to the subject
        subject_link = os.path.join(new_subjects_dir, subject_label)
        orig_dir = os.path.join(os.path.abspath(args.fsdir), subject_label)
        if not os.path.exists(orig_dir):
            continue
        if not os.path.islink(subject_link):
            os.symlink(orig_dir,
                       subject_link)
        from bids.grabbids import BIDSLayout
        layout = layout = BIDSLayout(bids_dir)
        for task in layout.get_tasks():
            TR, slice_times, slice_thickness, files = get_info(bids_dir, subject_label, task)
            name = 'resting_{sub}_{task}'.format(sub=subject_label, task=task)
            kwargs = dict(files=files,
                          target_file=os.path.abspath(args.target_file),
                          subject_id=subject_label,
                          TR=TR,
                          slice_times=slice_times,
                          vol_fwhm=args.vol_fwhm,
                          surf_fwhm=args.surf_fwhm,
                          norm_threshold=2.,
                          subjects_dir=new_subjects_dir,
                          target_subject=args.target_surfs,
                          lowpass_freq=args.lowpass_freq,
                          highpass_freq=args.highpass_freq,
                          sink_directory=os.path.abspath(os.path.join(out_dir, subject_label, task)),
                          name=name)
            wf = create_workflow(**kwargs)
            meta_wf.add_nodes([wf])
    return meta_wf
示例#29
0
def test_compute_avg_wf():
    from nipype import Workflow

    nnodes = 1
    work_dir = 'test_ca_wf_work'
    chunk = chunks
    delay = 0
    benchmark_dir = None
    benchmark = False
    cli = True

    wf = Workflow('test_ca_wf')
    wf.base_dir = work_dir

    inc_1, ca_1 = ca.computed_avg_node('ca_bb',
                                       nnodes,
                                       work_dir,
                                       chunk=chunk,
                                       delay=delay,
                                       benchmark_dir=benchmark_dir,
                                       benchmark=benchmark,
                                       cli=cli)

    wf.add_nodes([inc_1])

    wf.connect([(inc_1, ca_1, [('inc_chunk', 'chunks')])])
    nodename = 'inc_2_test'
    inc_2, ca_2 = ca.computed_avg_node(nodename,
                                       nnodes,
                                       work_dir,
                                       delay=delay,
                                       benchmark_dir=benchmark_dir,
                                       benchmark=benchmark,
                                       cli=cli)

    wf.connect([(ca_1, inc_2, [('avg_chunk', 'avg')])])
    wf.connect([(inc_1, inc_2, [('inc_chunk', 'chunk')])])
    wf_out = wf.run('SLURM',
                    plugin_args={
                        'template':
                        'benchmark_scripts/nipype_kmeans_template.sh'
                    })

    node_names = [i.name for i in wf_out.nodes()]
    result_dict = dict(zip(node_names, wf_out.nodes()))
    saved_chunks = (
        result_dict['ca1_{0}'.format(nodename)].result.outputs.inc_chunk)
    avg_file = (
        result_dict['ca2_{0}'.format('ca_bb')].result.outputs.avg_chunk)
    inc1_chunks = (
        result_dict['ca1_{0}'.format('ca_bb')].result.outputs.inc_chunk)

    results = [i for c in saved_chunks for i in c]
    inc1 = [i for c in inc1_chunks for i in c]

    im_1 = nib.load(chunks[0])
    im_3 = nib.load(chunks[1])

    assert np.array_equal(im_3.get_data(), nib.load(chunks[1]).get_data())

    im_1_inc = (im_1.get_data() + 1)
    im_3_inc = (im_3.get_data() + 1)
    nib.save(nib.Nifti1Image(im_1_inc, im_1.affine, im_1.header),
             'test-inc1_1.nii')
    nib.save(nib.Nifti1Image(im_3_inc, im_3.affine, im_3.header),
             'test-inc3_1.nii')

    for i in inc1:
        if 'dummy_1' in i:
            assert np.array_equal(
                nib.load(i).get_data(),
                nib.load('test-inc1_1.nii').get_data())
        else:
            assert np.array_equal(
                nib.load(i).get_data(),
                nib.load('test-inc3_1.nii').get_data())

    avg = None

    for i in [im_1_inc, im_3_inc]:
        if avg is None:
            avg = i.astype(np.float64, casting='safe')
        else:
            avg += i.astype(np.float64, casting='safe')

    avg /= len([im_1_inc, im_3_inc])

    nib.save(nib.Nifti1Image(avg.astype(np.uint16), np.eye(4)), 't_avg.nii')

    assert np.array_equal(
        nib.load(avg_file).get_fdata(),
        nib.load('t_avg.nii').get_fdata())

    im_1_inc_2 = nib.load('test-inc1_1.nii').get_data() + 1
    im_3_inc_2 = nib.load('test-inc3_1.nii').get_data() + 1

    avg = nib.load('t_avg.nii').get_data()
    im_1_ca = (im_1_inc_2 + avg)
    im_3_ca = (im_3_inc_2 + avg)

    nib.save(nib.Nifti1Image(im_1_ca, im_1.affine, im_1.header),
             'test-inc1_2.nii')
    nib.save(nib.Nifti1Image(im_3_ca, im_3.affine, im_3.header),
             'test-inc3_2.nii')

    for i in results:
        assert op.isfile(i)
        ca_res = nib.load(i)
        ca_res = ca_res.get_data().astype(np.uint16)
        if 'inc-dummy_1.nii' in i:
            im = nib.load('test-inc1_2.nii')
            exp = im.get_data().astype(np.uint16)
            assert np.array_equal(ca_res, exp)
        else:
            im = nib.load('test-inc3_2.nii')
            exp = im.get_data().astype(np.uint16)
            assert np.array_equal(ca_res, exp)

    p = subprocess.Popen([
        'python', 'pipelines/spark_inc.py', 'sample_data', 'spca_out', '2',
        '--benchmark', '--computed_avg'
    ],
                         stdin=subprocess.PIPE,
                         stdout=subprocess.PIPE)
    (out, err) = p.communicate()

    h_prog_1 = hashlib.md5(open('spca_out/inc2-dummy_1.nii', 'rb').read()) \
                       .hexdigest()
    h_exp_1 = hashlib.md5(open('test-inc1_2.nii', 'rb')
                           .read()) \
                      .hexdigest()

    h_prog_2 = hashlib.md5(open('spca_out/inc2-dummy_3.nii', 'rb').read()) \
                       .hexdigest()
    h_exp_2 = hashlib.md5(open('test-inc3_2.nii', 'rb')
                           .read()) \
                      .hexdigest()

    assert h_prog_1 == h_exp_1
    assert h_prog_2 == h_exp_2
    else:
        from io import StringIO
        data = StringIO(r.content.decode())

    df = pd.read_csv(data)
    max_subjects = df.shape[0]
    if args.num_subjects:
        max_subjects = args.num_subjects
    elif ('CIRCLECI' in os.environ and os.environ['CIRCLECI'] == 'true'):
        max_subjects = 1
    
    meta_wf = Workflow('metaflow')
    count = 0
    for row in df.iterrows():
        wf = create_workflow(row[1].Subject, sink_dir, row[1]['File Path'])
        meta_wf.add_nodes([wf])
        print('Added workflow for: {}'.format(row[1].Subject))
        count = count + 1
        # run this for only one person on CircleCI
        if count >= max_subjects:
            break

    meta_wf.base_dir = work_dir
    meta_wf.config['execution']['remove_unnecessary_files'] = False
    meta_wf.config['execution']['poll_sleep_duration'] = 2
    meta_wf.config['execution']['crashdump_dir'] = work_dir
    if args.plugin_args:
        meta_wf.run(args.plugin, plugin_args=eval(args.plugin_args))
    else:
        meta_wf.run(args.plugin)
示例#31
0
def nearest_centroid_wf(partition,
                        centroids,
                        work_dir,
                        benchmark_dir=None,
                        tmpfs='/dev/shm'):
    from nipype import Workflow, Node, MapNode, Function
    import nipype_kmeans as nk
    import uuid
    from time import time
    from benchmark import write_bench
    from socket import gethostname
    from os.path import basename, join
    try:
        from threading import get_ident
    except Exception as e:
        from thread import get_ident

    start = time()

    exec_id = uuid.uuid1()
    wf = Workflow('km_bb{}'.format(exec_id))
    wf.base_dir = (join(tmpfs, basename(work_dir))
                   if tmpfs is not None else work_dir)

    gc_nname = 'gc'
    idx = 0

    for chunk in partition:
        gc_nname_it = '{0}-{1}'.format(gc_nname, idx)
        gc = Node(Function(input_names=['img', 'centroids'],
                           output_names=['assignment_files'],
                           function=nk.get_nearest_centroid),
                  name=gc_nname_it)

        gc.inputs.img = chunk
        gc.inputs.centroids = centroids

        wf.add_nodes([gc])
        idx += 1

    wf_out = wf.run('MultiProc')

    node_names = [i.name for i in wf_out.nodes()]
    result_dict = dict(zip(node_names, wf_out.nodes()))

    assignments = ([
        result_dict['{0}-{1}'.format(gc_nname,
                                     i)].result.outputs.assignment_files
        for i in range(0, len(partition))
    ])

    assignments = [t for l in assignments for t in l]

    wf = Workflow('km_lr{}'.format(exec_id))
    wf.base_dir = work_dir

    lr_nname = 'lr'

    for c in centroids:
        lr_nname_it = '{0}-{1}'.format(lr_nname, c[0])
        lr = Node(Function(input_names=['centroid', 'assignments'],
                           output_names=['assignment_files'],
                           function=nk.reduceFilesByCentroid),
                  name=lr_nname_it)
        lr.inputs.centroid = c
        lr.inputs.assignments = assignments

        wf.add_nodes([lr])

    wf_out = wf.run('MultiProc')

    node_names = [i.name for i in wf_out.nodes()]
    result_dict = dict(zip(node_names, wf_out.nodes()))

    assignments = ([
        result_dict['{0}-{1}'.format(lr_nname,
                                     c[0])].result.outputs.assignment_files
        for c in centroids
    ])

    end = time()

    if benchmark_dir is not None:
        write_bench('get_nearest_centroid', start, end, gethostname(),
                    'partition', get_ident(), benchmark_dir)
    return assignments
        'https://docs.google.com/spreadsheets/d/{key}/export?format=csv&id={key}'
        .format(key=args.key))
    data = r.content

    df = pd.read_csv(StringIO(data))
    max_subjects = df.shape[0]
    if args.num_subjects:
        max_subjects = args.num_subjects
    elif ('CIRCLECI' in os.environ and os.environ['CIRCLECI'] == 'true'):
        max_subjects = 1

    meta_wf = Workflow('metaflow')
    count = 0
    for row in df.iterrows():
        wf = create_workflow(row[1].Subject, sink_dir, row[1]['File Path'])
        meta_wf.add_nodes([wf])
        print('Added workflow for: {}'.format(row[1].Subject))
        count = count + 1
        # run this for only one person on CircleCI
        if count >= max_subjects:
            break

    meta_wf.base_dir = work_dir
    meta_wf.config['execution']['remove_unnecessary_files'] = False
    meta_wf.config['execution']['poll_sleep_duration'] = 2
    meta_wf.config['execution']['crashdump_dir'] = work_dir
    if args.plugin_args:
        meta_wf.run(args.plugin, plugin_args=eval(args.plugin_args))
    else:
        meta_wf.run(args.plugin)
示例#33
0
文件: base.py 项目: heffjos/ecp
def init_bidsify_hcp_wf(
    data_dir,
    work_dir, 
    out_dir,
    subject,
    tasks,
    skip_begin,
    skip_end,
    name='bidsify_hcp_wf',
):
    """
    Creates bidisfy_hcp workflow.

    Parameters
    ----------

    data_dir: str
        data directory holding subject folders
    work_dir: str
        working directory
    out_dir: str
        out directory. Final out directory is out_dir/bidsify_hcp
    subject: str
        subject name; data_dir expected to have a subject directory
    tasks: list (str)
        the task names in HCP format
    skip_begin: list (int)
        this option is intended for pre-upgrade scans. It removes this many 
        volumes from the beginning of the HCP movement regressors file, because
        this number of volumes were removed from the scan after preprocessing
        (nifti and cifti match) but not the movement regresssors file.
    skip_end: list (int)
        this option is intended for pre-upgrade scans. It removes this many 
        volumes from the end of the HCP movement regressors file, because
        this number of volumes were removed from the scan after preprocessing
        (nifti and cifti match) but not the movement regresssors file.

    Outputs
    -------

    bidsify_hcp_wf: Workflow
        the cleanprep workflow
    """

    DerivativesDataSink = bids.DerivativesDataSink
    DerivativesDataSink.out_path_base = 'bidsify_hcp'

    anat_name_template = os.path.join('anat', f'sub-{subject}_T1.nii.gz')

    bidsify_hcp_wf = Workflow(name=name, base_dir=work_dir)
    
    anat_files = PostFreeSurferFiles(base_dir=data_dir,
                                     subject=subject).run().outputs

    hcp_segment_anat_wf = init_hcp_segment_anat_wf()
    inputnode = hcp_segment_anat_wf.inputs.inputnode
    inputnode.brainmask_fs = anat_files.brainmask_fs
    inputnode.l_atlasroi = anat_files.L_atlasroi_32k_fs_LR
    inputnode.l_midthickness = anat_files.L_midthickness_32k_fs_LR
    inputnode.l_white = anat_files.L_white_32k_fs_LR
    inputnode.l_pial = anat_files.L_pial_32k_fs_LR
    inputnode.r_atlasroi = anat_files.R_atlasroi_32k_fs_LR
    inputnode.r_midthickness = anat_files.R_midthickness_32k_fs_LR
    inputnode.r_white = anat_files.R_white_32k_fs_LR
    inputnode.r_pial = anat_files.R_pial_32k_fs_LR
    inputnode.wmparc = anat_files.wmparc
    inputnode.ROIs = anat_files.subcortical

    ds_csf_mask = Node(DerivativesDataSink(
        base_directory=out_dir, desc='csf', source_file=anat_name_template,
        space='mni', suffix='mask'), name='ds_csf_mask')
    ds_wm_mask = Node(DerivativesDataSink(
        base_directory=out_dir, desc='wm', source_file=anat_name_template,
        space='mni', suffix='mask'), name='ds_wm_mask')
    ds_cortical_gm_mask = Node(DerivativesDataSink(
        base_directory=out_dir, desc='cortgm', source_file=anat_name_template,
        space='mni', suffix='mask'), name='ds_coritical_gm_mask')

    bidsify_hcp_wf.connect([
        (hcp_segment_anat_wf, ds_csf_mask, [('outputnode.csf_mask', 'in_file')]),
        (hcp_segment_anat_wf, ds_wm_mask, [('outputnode.wm_mask', 'in_file')]),
        (hcp_segment_anat_wf, ds_cortical_gm_mask, [('outputnode.cort_gm_mask', 'in_file')]),
    ])

    out_func_dir = os.path.join(out_dir, 
                                DerivativesDataSink.out_path_base,
                                f'sub-{subject}',
                                'func')

    for task, task_skip_begin, task_skip_end in zip(tasks, 
                                                    skip_begin, 
                                                    skip_end):

        out_vol = utils.hcp_to_bids(task, subject)
        entities = utils.get_entities(out_vol)
        out_vol = os.path.join(out_func_dir, out_vol)

        out_cifti = utils.generate_bold_name(
            subject, entities['task'], 'bold', '.dtseries.nii', dir=entities['dir'], 
            run=entities['run'], space='fsLR32k')
        out_cifti = os.path.join(out_func_dir, out_cifti)

        task_vol_files = HcpTaskVolumeFiles(
            mninonlinear=anat_files.mninonlinear,
            subject=subject, 
            task=task).run().outputs

        task_cifti_files = HcpTaskCiftiFiles(
            mninonlinear=anat_files.mninonlinear,
            subject=subject, 
            task=task).run().outputs

        func_name_template = os.path.join('func', utils.hcp_to_bids(task, subject))
        task_wf = Workflow(name=task + '_wf')

        movement = Node(
            GetHcpMovement(hcp_movement=task_vol_files.movement_regressors,
                           skip_begin=int(task_skip_begin),
                           skip_end=int(task_skip_end)),
            name='movement')
        
        link_vol = Node(Function(input_name=['originalfile', 'newfile'],
                                 function=_hardlink), name='link_vol')
        link_vol.inputs.originalfile = task_vol_files.preproc
        link_vol.inputs.newfile = out_vol

        link_cifti = Node(Function(input_name=['originalfile', 'newfile'],
                                   function=_hardlink), name='link_cifti')
        link_cifti.inputs.originalfile = task_cifti_files.preproc
        link_cifti.inputs.newfile = out_cifti

        generate_boldmask = init_generate_boldmask(task_vol_files.preproc)
        ds_boldmask = Node(DerivativesDataSink(
            base_directory=out_dir, 
            desc='confounds',
            source_file=func_name_template, 
            suffix='boldmask'),
            name='ds_boldmask')

        ds_movement = Node(DerivativesDataSink(
            base_directory=out_dir, 
            desc='movpar', 
            source_file=func_name_template, 
            suffix='timeseries'), 
            name='ds_movement')

        task_wf.connect([
            # derivatives
            (generate_boldmask, ds_boldmask, [('outputnode.bold_mask', 'in_file')]),
            (movement, ds_movement, [('movement', 'in_file')]),
        ])
        task_wf.add_nodes([link_vol, link_cifti])

        bidsify_hcp_wf.add_nodes([task_wf])

    return bidsify_hcp_wf
示例#34
0
    def run(self):

        wf = Workflow('bapp')
        wf.base_dir = os.getcwd()

        # group analysis can be executed if participant analysis is skipped
        p_analysis = None

        # Participant analysis
        if self.do_participant_analysis:

            participants = Node(Function(input_names=['nip'],
                                         output_names=['out'],
                                         function=get_participants),
                                name='get_participants')
            participants.inputs.nip = self

            p_analysis = MapNode(Function(input_names=[
                'nip', 'analysis_level', 'participant_label', 'working_dir'
            ],
                                          output_names=['result'],
                                          function=run_analysis),
                                 iterfield=['participant_label'],
                                 name='run_participant_analysis')

            wf.add_nodes([participants])
            wf.connect(participants, 'out', p_analysis, 'participant_label')

            p_analysis.inputs.analysis_level = 'participant'
            p_analysis.inputs.nip = self
            p_analysis.inputs.working_dir = os.getcwd()

        # Group analysis
        if self.do_group_analysis:
            groups = Node(Function(input_names=[
                'nip', 'analysis_level', 'working_dir', 'dummy_token'
            ],
                                   output_names=['g_result'],
                                   function=run_analysis),
                          name='run_group_analysis')

            groups.inputs.analysis_level = 'group'
            groups.inputs.nip = self
            groups.inputs.working_dir = os.getcwd()

            if p_analysis is not None:
                wf.connect(p_analysis, 'result', groups, 'dummy_token')
            else:
                wf.add_nodes([groups])

        eg = wf.run()

        # Convert to dictionary to more easily extract results
        node_names = [i.name for i in eg.nodes()]
        result_dict = dict(zip(node_names, eg.nodes()))

        if self.do_participant_analysis:
            for res in result_dict[
                    'run_participant_analysis'].result.outputs.get('result'):
                self.pretty_print(res)

        if self.do_group_analysis:
            self.pretty_print(
                result_dict['run_group_analysis'].result.outputs.g_result)
def main():
    parser = argparse.ArgumentParser(description="BigBrain binarization")

    parser.add_argument(
        'bb_dir',
        type=str,
        help='The folder containing BigBrain NIfTI images (local fs only)')
    parser.add_argument(
        'output_dir',
        type=str,
        help='the folder to save binarized images to (local fs only)')
    parser.add_argument('threshold', type=int, help='binarization threshold')
    parser.add_argument('iterations', type=int, help='number of iterations')
    parser.add_argument('--benchmark',
                        action='store_true',
                        help='benchmark pipeline')

    args = parser.parse_args()

    start = time()
    wf = Workflow('bin_bb')
    wf.base_dir = os.path.dirname(args.output_dir)

    output_dir = os.path.abspath(args.output_dir)
    os.makedirs(output_dir, exist_ok=True)

    # get all files in directory
    bb_files = glob.glob(os.path.join(os.path.abspath(args.bb_dir), '*'))

    #loaded_data = MapNode(Function(input_names=))
    binarized_1 = MapNode(Function(
        input_names=['chunk', 'threshold', 'benchmark', 'start', 'output_dir'],
        output_names=['bin_chunk'],
        function=binarize_chunk),
                          iterfield=['chunk'],
                          name='binarize_bb')

    binarized_1.inputs.chunk = bb_files
    binarized_1.inputs.threshold = args.threshold
    binarized_1.inputs.output_dir = output_dir
    binarized_1.inputs.benchmark = args.benchmark
    binarized_1.inputs.start = start
    wf.add_nodes([binarized_1])

    for i in range(args.iterations - 1):
        node_name = 'binarize_bb{}'.format(i + 1)
        binarized_2 = MapNode(Function(input_names=[
            'chunk', 'threshold', 'benchmark', 'start', 'output_dir'
        ],
                                       output_names=['bin_chunk'],
                                       function=binarize_chunk),
                              iterfield=['chunk'],
                              name=node_name)

        binarized_2.inputs.threshold = args.threshold
        binarized_2.inputs.output_dir = output_dir
        binarized_2.inputs.benchmark = args.benchmark
        binarized_2.inputs.start = start

        wf.connect([(binarized_1, binarized_2, [('bin_chunk', 'chunk')])])

        binarized_1 = binarized_2

    wf.run(plugin='SLURM')