コード例 #1
0
def test_Commandline_environ(monkeypatch, tmpdir):
    from nipype import config
    config.set_default_config()

    tmpdir.chdir()
    monkeypatch.setitem(os.environ, 'DISPLAY', ':1')
    # Test environment
    ci3 = nib.CommandLine(command='echo')
    res = ci3.run()
    assert res.runtime.environ['DISPLAY'] == ':1'

    # Test display_variable option
    monkeypatch.delitem(os.environ, 'DISPLAY', raising=False)
    config.set('execution', 'display_variable', ':3')
    res = ci3.run()
    assert 'DISPLAY' not in ci3.inputs.environ
    assert 'DISPLAY' not in res.runtime.environ

    # If the interface has _redirect_x then yes, it should be set
    ci3._redirect_x = True
    res = ci3.run()
    assert res.runtime.environ['DISPLAY'] == ':3'

    # Test overwrite
    monkeypatch.setitem(os.environ, 'DISPLAY', ':1')
    ci3.inputs.environ = {'DISPLAY': ':2'}
    res = ci3.run()
    assert res.runtime.environ['DISPLAY'] == ':2'
コード例 #2
0
ファイル: write_graphs.py プロジェクト: toddt/lyman
def main():

    config.set('logging', 'workflow_level', 'CRITICAL')

    # Find the functions that create workflows
    wf_funcs = [k for k in dir(wf) if re.match("create_.*_workflow", k)]

    for func in wf_funcs:
        try:
            out = getattr(wf, func)()
        except:
            print "ERROR: call to %s failed" % func

        # Some of the workflow functions return (flow, inputs, outputs)
        try:
            flow, _, _ = out
        except TypeError:
            flow = out

        # Write the graphs
        name = flow.name
        flow.write_graph("graphs/%s.dot" % name, "orig")

    # Remove the .dot files as they are not of use to us
    files = glob("graphs/*")
    for f in files:
        if f.endswith(".dot"):
            os.remove(f)
コード例 #3
0
def test_Commandline_environ(monkeypatch, tmpdir):
    from nipype import config

    config.set_default_config()

    tmpdir.chdir()
    monkeypatch.setitem(os.environ, "DISPLAY", ":1")
    # Test environment
    ci3 = nib.CommandLine(command="echo")
    res = ci3.run()
    assert res.runtime.environ["DISPLAY"] == ":1"

    # Test display_variable option
    monkeypatch.delitem(os.environ, "DISPLAY", raising=False)
    config.set("execution", "display_variable", ":3")
    res = ci3.run()
    assert "DISPLAY" not in ci3.inputs.environ
    assert "DISPLAY" not in res.runtime.environ

    # If the interface has _redirect_x then yes, it should be set
    ci3._redirect_x = True
    res = ci3.run()
    assert res.runtime.environ["DISPLAY"] == ":3"

    # Test overwrite
    monkeypatch.setitem(os.environ, "DISPLAY", ":1")
    ci3.inputs.environ = {"DISPLAY": ":2"}
    res = ci3.run()
    assert res.runtime.environ["DISPLAY"] == ":2"
コード例 #4
0
ファイル: test_core.py プロジェクト: TheChymera/nipype
def test_Commandline_environ(monkeypatch, tmpdir):
    from nipype import config
    config.set_default_config()

    tmpdir.chdir()
    monkeypatch.setitem(os.environ, 'DISPLAY', ':1')
    # Test environment
    ci3 = nib.CommandLine(command='echo')
    res = ci3.run()
    assert res.runtime.environ['DISPLAY'] == ':1'

    # Test display_variable option
    monkeypatch.delitem(os.environ, 'DISPLAY', raising=False)
    config.set('execution', 'display_variable', ':3')
    res = ci3.run()
    assert 'DISPLAY' not in ci3.inputs.environ
    assert 'DISPLAY' not in res.runtime.environ

    # If the interface has _redirect_x then yes, it should be set
    ci3._redirect_x = True
    res = ci3.run()
    assert res.runtime.environ['DISPLAY'] == ':3'

    # Test overwrite
    monkeypatch.setitem(os.environ, 'DISPLAY', ':1')
    ci3.inputs.environ = {'DISPLAY': ':2'}
    res = ci3.run()
    assert res.runtime.environ['DISPLAY'] == ':2'
コード例 #5
0
def init_logging(workdir):
    """
    Add new logging handler to nipype to output to log directory

    :param workdir: Log directory

    """
    fp = os.path.join(workdir, "pipeline.json")

    with open(fp, "r") as f:
        data = json.load(f)

    images = transpose(data["images"])

    real_output_dir = os.path.join(workdir, "log")

    hdlr = WfHandler(real_output_dir, images)

    from nipype import logging as nlogging
    from nipype import config

    formatter = Formatter(fmt=nlogging.fmt, datefmt=nlogging.datefmt)
    hdlr.setFormatter(formatter)

    config.set("logging", "interface_level", "DEBUG")
    nlogging.update_logging(config)

    nlogging._iflogger.handlers = []
    nlogging._iflogger.propagate = False
    nlogging._iflogger.addHandler(hdlr)

    nlogging._logger.handlers = []
    nlogging._logger.propagate = True
    nlogging._logger.addHandler(hdlr)
コード例 #6
0
def main(arglist):

    config.set('logging', 'workflow_level', 'CRITICAL')

    # Find the functions that create workflows
    wf_funcs = [k for k in dir(wf) if re.match("create_.*_workflow", k)]

    for func in wf_funcs:
        try:
            out = getattr(wf, func)()
        except:
            print("ERROR: call to %s failed" % func)

        # Some of the workflow functions return (flow, inputs, outputs)
        try:
            flow, _, _ = out
        except TypeError:
            flow = out

        # Write the graphs
        name = flow.name
        if arglist:
            if name in arglist:
                flow.write_graph("graphs/%s.dot" % name, "orig", format="svg")
        else:
            flow.write_graph("graphs/%s.dot" % name, "orig", format="svg")

    # Remove the .dot files as they are not of use to us
    files = glob("graphs/*")
    for f in files:
        if f.endswith(".dot"):
            os.remove(f)
コード例 #7
0
def run(args):
    """Get and process specific information"""
    project = gather_project_info()
    exp = gather_experiment_info(args.experiment, args.model)

    # Subject is always highest level of parameterization
    subject_list = determine_subjects(args.subjects)
    subj_source = make_subject_source(subject_list)

    # Get the full correct name for the experiment
    if args.experiment is None:
        exp_name = project["default_exp"]
    else:
        exp_name = args.experiment

    exp['exp_name'] = exp_name
    exp['model_name'] = args.model if args.model else ''

    # Set roots of output storage
    project['analysis_dir'] = op.join(project["analysis_dir"], exp_name)
    project['working_dir'] = op.join(project["working_dir"], exp_name,
                                     exp['model_name'])

    config.set("execution", "crashdump_dir", project["crash_dir"])
    if args.verbose > 0:
        config.set("logging", "filemanip_level", 'DEBUG')
        config.enable_debug_mode()
        logging.update_logging(config)

    if not op.exists(project['analysis_dir']):
        os.makedirs(project['analysis_dir'])

    workflows_dir = os.path.join(os.environ['FITZ_DIR'], exp['pipeline'],
                                 'workflows')
    if not op.isdir(workflows_dir):
        missing_pipe = 'raise'
        if missing_pipe == 'install':
            install(args)
        else:
            raise IOError("Run `fitz install` to set up your pipeline of "
                          "workflows, %s does not exist." % workflows_dir)
    sys.path.insert(0, workflows_dir)
    for wf_name in args.workflows:
        try:
            mod = imp.find_module(wf_name)
            wf_module = imp.load_module("wf", *mod)
        except (IOError, ImportError):
            print "Could not find any workflows matching %s" % wf_name
            raise

        params = update_params(wf_module, exp)
        workflow = wf_module.workflow_manager(project, params, args,
                                              subj_source)

        # Run the pipeline
        plugin, plugin_args = determine_engine(args)
        workflow.write_graph(str(workflow) + '.dot', format='svg')
        if not args.dontrun:
            workflow.run(plugin, plugin_args)
コード例 #8
0
ファイル: frontend.py プロジェクト: kastman/fitz
def run(args):
    """Get and process specific information"""
    project = gather_project_info()
    exp = gather_experiment_info(args.experiment, args.model)

    # Subject is always highest level of parameterization
    subject_list = determine_subjects(args.subjects)
    subj_source = make_subject_source(subject_list)

    # Get the full correct name for the experiment
    if args.experiment is None:
        exp_name = project["default_exp"]
    else:
        exp_name = args.experiment

    exp['exp_name'] = exp_name
    exp['model_name'] = args.model if args.model else ''

    # Set roots of output storage
    project['analysis_dir'] = op.join(project["analysis_dir"], exp_name)
    project['working_dir'] = op.join(project["working_dir"], exp_name,
                                     exp['model_name'])

    config.set("execution", "crashdump_dir", project["crash_dir"])
    if args.verbose > 0:
        config.set("logging", "filemanip_level", 'DEBUG')
        config.enable_debug_mode()
        logging.update_logging(config)

    if not op.exists(project['analysis_dir']):
        os.makedirs(project['analysis_dir'])

    workflows_dir = os.path.join(os.environ['FITZ_DIR'], exp['pipeline'],
                                 'workflows')
    if not op.isdir(workflows_dir):
        missing_pipe = 'raise'
        if missing_pipe == 'install':
            install(args)
        else:
            raise IOError("Run `fitz install` to set up your pipeline of "
                          "workflows, %s does not exist." % workflows_dir)
    sys.path.insert(0, workflows_dir)
    for wf_name in args.workflows:
        try:
            mod = imp.find_module(wf_name)
            wf_module = imp.load_module("wf", *mod)
        except (IOError, ImportError):
            print "Could not find any workflows matching %s" % wf_name
            raise

        params = update_params(wf_module, exp)
        workflow = wf_module.workflow_manager(
            project, params, args, subj_source)

        # Run the pipeline
        plugin, plugin_args = determine_engine(args)
        workflow.write_graph(str(workflow)+'.dot', format='svg')
        if not args.dontrun:
            workflow.run(plugin, plugin_args)
コード例 #9
0
ファイル: test_config.py プロジェクト: yaoyang33/nipype
def test_display_config(monkeypatch, dispnum):
    """Check that the display_variable option is used ($DISPLAY not set)"""
    config._display = None
    dispstr = ':%d' % dispnum
    config.set('execution', 'display_variable', dispstr)
    monkeypatch.delitem(os.environ, 'DISPLAY', raising=False)
    assert config.get_display() == config.get('execution', 'display_variable')
    # Test that it was correctly cached
    assert config.get_display() == config.get('execution', 'display_variable')
コード例 #10
0
def test_display_config_and_system(monkeypatch):
    """Check that when only both config and $DISPLAY are defined, the config takes precedence"""
    config._display = None
    dispstr = ':10'
    config.set('execution', 'display_variable', dispstr)
    monkeypatch.setitem(os.environ, 'DISPLAY', ':0')
    assert config.get_display() == dispstr
    # Test that it was correctly cached
    assert config.get_display() == dispstr
コード例 #11
0
def test_display_config_and_system(monkeypatch):
    """Check that when only both config and $DISPLAY are defined, the config
    takes precedence"""
    config._display = None
    dispstr = ":10"
    config.set("execution", "display_variable", dispstr)
    monkeypatch.setenv("DISPLAY", ":0")
    assert config.get_display() == dispstr
    # Test that it was correctly cached
    assert config.get_display() == dispstr
コード例 #12
0
ファイル: test_provenance.py プロジェクト: mfalkiewicz/nipype
def test_provenance_exists(tmpdir):
    tmpdir.chdir()
    from nipype import config
    from nipype.interfaces.base import CommandLine
    provenance_state = config.get('execution', 'write_provenance')
    hash_state = config.get('execution', 'hash_method')
    config.enable_provenance()
    CommandLine('echo hello').run()
    config.set('execution', 'write_provenance', provenance_state)
    config.set('execution', 'hash_method', hash_state)
    assert tmpdir.join('provenance.provn').check()
コード例 #13
0
ファイル: test_provenance.py プロジェクト: yzw0041/nipype
def test_provenance_exists(tmpdir):
    tmpdir.chdir()
    from nipype import config
    from nipype.interfaces.base import CommandLine
    provenance_state = config.get('execution', 'write_provenance')
    hash_state = config.get('execution', 'hash_method')
    config.enable_provenance()
    CommandLine('echo hello').run()
    config.set('execution', 'write_provenance', provenance_state)
    config.set('execution', 'hash_method', hash_state)
    assert tmpdir.join('provenance.provn').check()
コード例 #14
0
def test_provenance_exists(tmpdir):
    tmpdir.chdir()
    from nipype import config
    from nipype.interfaces.base import CommandLine

    provenance_state = config.get("execution", "write_provenance")
    hash_state = config.get("execution", "hash_method")
    config.enable_provenance()
    CommandLine("echo hello").run()
    config.set("execution", "write_provenance", provenance_state)
    config.set("execution", "hash_method", hash_state)
    assert tmpdir.join("provenance.provn").check()
コード例 #15
0
ファイル: test_provenance.py プロジェクト: LJWilliams/nipype
def test_provenance_exists(tmpdir):
    tempdir = str(tmpdir)
    os.chdir(tempdir)
    from nipype import config
    from nipype.interfaces.base import CommandLine
    provenance_state = config.get('execution', 'write_provenance')
    hash_state = config.get('execution', 'hash_method')
    config.enable_provenance()
    CommandLine('echo hello').run()
    config.set('execution', 'write_provenance', provenance_state)
    config.set('execution', 'hash_method', hash_state)
    provenance_exists = os.path.exists(os.path.join(tempdir, 'provenance.provn'))
    assert provenance_exists
コード例 #16
0
def test_cmdline_profiling(tmpdir, mem_gb, n_procs):
    """
    Test runtime profiler correctly records workflow RAM/CPUs consumption
    of a CommandLine-derived interface
    """
    from nipype import config
    config.set('execution', 'resource_monitor_frequency', '0.2')  # Force sampling fast

    tmpdir.chdir()
    iface = UseResources(mem_gb=mem_gb, n_procs=n_procs)
    result = iface.run()

    assert abs(mem_gb - result.runtime.mem_peak_gb) < 0.3, 'estimated memory error above .3GB'
    assert int(result.runtime.cpu_percent / 100 + 0.2) == n_procs, 'wrong number of threads estimated'
コード例 #17
0
def test_provenance_exists(tmpdir):
    tempdir = str(tmpdir)
    os.chdir(tempdir)
    from nipype import config
    from nipype.interfaces.base import CommandLine
    provenance_state = config.get('execution', 'write_provenance')
    hash_state = config.get('execution', 'hash_method')
    config.enable_provenance()
    CommandLine('echo hello').run()
    config.set('execution', 'write_provenance', provenance_state)
    config.set('execution', 'hash_method', hash_state)
    provenance_exists = os.path.exists(
        os.path.join(tempdir, 'provenance.provn'))
    assert provenance_exists
コード例 #18
0
def test_cmdline_profiling(tmpdir, mem_gb, n_procs):
    """
    Test runtime profiler correctly records workflow RAM/CPUs consumption
    of a CommandLine-derived interface
    """
    from nipype import config
    config.set('monitoring', 'sample_frequency', '0.2')  # Force sampling fast

    tmpdir.chdir()
    iface = UseResources(mem_gb=mem_gb, n_procs=n_procs)
    result = iface.run()

    assert abs(mem_gb - result.runtime.mem_peak_gb) < 0.3, 'estimated memory error above .3GB'
    assert int(result.runtime.cpu_percent / 100 + 0.2) == n_procs, 'wrong number of threads estimated'
コード例 #19
0
def test_function_profiling(tmpdir, mem_gb, n_procs):
    """
    Test runtime profiler correctly records workflow RAM/CPUs consumption
    of a Function interface
    """
    from nipype import config
    config.set('execution', 'resource_monitor_frequency', '0.2')  # Force sampling fast

    tmpdir.chdir()
    iface = niu.Function(function=_use_resources)
    iface.inputs.mem_gb = mem_gb
    iface.inputs.n_procs = n_procs
    result = iface.run()

    assert abs(mem_gb - result.runtime.mem_peak_gb) < 0.3, 'estimated memory error above .3GB'
    assert int(result.runtime.cpu_percent / 100 + 0.2) >= n_procs
コード例 #20
0
def test_function_profiling(tmpdir, mem_gb, n_procs):
    """
    Test runtime profiler correctly records workflow RAM/CPUs consumption
    of a Function interface
    """
    from nipype import config
    config.set('monitoring', 'sample_frequency', '0.2')  # Force sampling fast

    tmpdir.chdir()
    iface = niu.Function(function=_use_resources)
    iface.inputs.mem_gb = mem_gb
    iface.inputs.n_procs = n_procs
    result = iface.run()

    assert abs(mem_gb - result.runtime.mem_peak_gb) < 0.3, 'estimated memory error above .3GB'
    assert int(result.runtime.cpu_percent / 100 + 0.2) >= n_procs
コード例 #21
0
ファイル: FiberUtils.py プロジェクト: doctoryfx/TractSeg
    def convert_tck_to_trk(filename_in, filename_out, reference_affine, compress_err_thr=0.1, smooth=None):
        '''
        Convert tck file to trk file and compress

        :param filename_in:
        :param filename_out:
        :param compress_err_thr: compress fibers if setting error threshold here (default: 0.1mm)
        :param smooth: smooth streamlines (default: None)
                       10: slight smoothing,  100: very smooth from beginning to end
        :return:
        '''
        #Hide large number of nipype logging outputs
        from nipype import config, logging
        config.set('execution', 'remove_unnecessary_outputs', 'true')
        config.set('logging', 'workflow_level', 'WARNING')
        config.set('logging', 'interface_level', 'WARNING')
        logging.update_logging(config)
        from nipype.interfaces.mrtrix.convert import read_mrtrix_tracks
        from dipy.tracking.metrics import spline

        hdr, streamlines = read_mrtrix_tracks(filename_in, as_generator=False)         # Load Fibers (Tck)

        if smooth is not None:
            streamlines_smooth = []
            for sl in streamlines:
                streamlines_smooth.append(spline(sl, s=smooth))
            streamlines = streamlines_smooth

        #Compressing also good to remove checkerboard artefacts from tracking on peaks
        if compress_err_thr is not None:
            streamlines = FiberUtils.compress_streamlines(streamlines, compress_err_thr)
        FiberUtils.save_streamlines_as_trk(filename_out, streamlines, reference_affine)
コード例 #22
0
    def convert_tck_to_trk(filename_in,
                           filename_out,
                           reference_affine,
                           compress_err_thr=0.1):
        '''
        Convert tck file to trk file and compress

        :param filename_in:
        :param filename_out:
        :param compress_err_thr: compress fibers if setting error threshold here (default: 0.1mm)
        :return:
        '''
        #Hide large number of nipype logging outputs
        from nipype import config, logging
        config.set('execution', 'remove_unnecessary_outputs', 'true')
        config.set('logging', 'workflow_level', 'WARNING')
        config.set('logging', 'interface_level', 'WARNING')
        logging.update_logging(config)
        from nipype.interfaces.mrtrix.convert import read_mrtrix_tracks

        hdr, streamlines = read_mrtrix_tracks(
            filename_in, as_generator=False)  # Load Fibers (Tck)
        #Compressing also good to remove checkerboard artefacts from tracking on peaks
        if compress_err_thr is not None:
            streamlines = FiberUtils.compress_streamlines(
                streamlines, compress_err_thr)
        FiberUtils.save_streamlines_as_trk(filename_out, streamlines,
                                           reference_affine)
コード例 #23
0
def create_and_run_p3_workflow(imported_workflows, settings):
    """
        Create main workflow
    """

    # Set nipype debug messages if enabled
    if settings['debug']:
        config.set('logging', 'workflow_level', 'DEBUG')
        config.set('logging', 'workflow_level', 'DEBUG')
    # always hash on content
    config.set('execution', 'hash_method', 'content')
    # stop on first crash
    config.set('execution', 'stop_on_first_crash', 'true')
    logging.update_logging(config)

    # define subworkflows from imported workflows
    subworkflows = generate_subworkflows(imported_workflows, settings)

    # create a workflow
    p3 = Workflow(name='p3_pipeline', base_dir=settings['tmp_dir'])

    # get connections
    connections = generate_connections(subworkflows, settings)

    # connect nodes
    p3.connect(connections)

    # apply sideloads
    sideload_nodes(p3, connections, settings)

    # Create graph images
    p3.write_graph(os.path.join(settings['output_dir'], 'graph', 'p3'),
                   graph2use='flat',
                   simple_form=False)
    p3.write_graph(os.path.join(settings['output_dir'], 'graph', 'p3'),
                   graph2use='colored')

    # copy the grpah files to the output directory
    # copy2(os.path.join(settings['tmp_dir'],'p3_pipeline','graph.png'),settings['output_dir'])
    # copy2(os.path.join(settings['tmp_dir'],'p3_pipeline','graph_detailed.png'),settings['output_dir'])

    # Run pipeline (check multiproc setting)
    if not settings['disable_run']:
        if settings['multiproc']:
            p3.run(plugin='MultiProc')
        else:
            p3.run()
コード例 #24
0
    def convert_tck_to_trk(filename_in,
                           filename_out,
                           reference_affine,
                           compress_err_thr=0.1,
                           smooth=None):
        '''
        Convert tck file to trk file and compress

        :param filename_in:
        :param filename_out:
        :param compress_err_thr: compress fibers if setting error threshold here (default: 0.1mm)
        :param smooth: smooth streamlines (default: None)
                       10: slight smoothing,  100: very smooth from beginning to end
        :return:
        '''
        #Hide large number of nipype logging outputs
        from nipype import config, logging
        config.set('execution', 'remove_unnecessary_outputs', 'true')
        config.set('logging', 'workflow_level', 'WARNING')
        config.set('logging', 'interface_level', 'WARNING')
        logging.update_logging(config)
        from nipype.interfaces.mrtrix.convert import read_mrtrix_tracks
        from dipy.tracking.metrics import spline

        hdr, streamlines = read_mrtrix_tracks(
            filename_in, as_generator=False)  # Load Fibers (Tck)

        if smooth is not None:
            streamlines_smooth = []
            for sl in streamlines:
                streamlines_smooth.append(spline(sl, s=smooth))
            streamlines = streamlines_smooth

        #Compressing also good to remove checkerboard artefacts from tracking on peaks
        if compress_err_thr is not None:
            streamlines = FiberUtils.compress_streamlines(
                streamlines, compress_err_thr)
        FiberUtils.save_streamlines_as_trk(filename_out, streamlines,
                                           reference_affine)
コード例 #25
0
# coding: utf-8

# Get the Node and Workflow object
from nipype import Node, Workflow, pipeline
import nipype.interfaces.utility as util  # utility (Needed?)

# Specify which SPM to use (useful for the SPM8 comparison testing)
from nipype.interfaces.matlab import MatlabCommand as mlabcmd

mlabcmd.set_default_paths('/usr/local/MATLAB/tools/spm12')

# Use nipype's version of collecting and inputing files.
from nipype import SelectFiles, DataSink, config
#config.enable_debug_mode()
config.set(
    'execution', 'stop_on_first_crash', 'true'
)  # Doesn't mean the whole pipeline will run properly if set to false, but can run through a couple times and hopefully hit the stragglers. Sometimes, it's because the scan doesn' exist, but the template is not flexible enough to catch it.
#config.set('execution', 'keep_inputs', 'true')
#config.set('execution', 'keep_unnecessary_files','true')
config.set('execution', 'hash_method', 'timestamp')
#config.set('execution', 'poll_sleep_duration','3')

import os
import glob
import os.path as op

# For now, hard code some of the paths that I can use to test the pipeline.
home_dir = op.abspath(
    '/data/analysis/brianne/exobk')  # Will get from tkinter eventually
paradigm = 'fp'  # food pics
output_dir = op.abspath(
コード例 #26
0
from nipype.interfaces.matlab import MatlabCommand
import os
from nipype import config

imports = ['import os',
           'import nibabel as nb',
           'import numpy as np',
           'import scipy as sp',
           ('from nipype.utils.filemanip import filename_to_list, '
               'list_to_filename, split_filename'),
           'from scipy.special import legendre'
           ]


#config.set('execution', 'remove_unnecessary_outputs', 'False')
config.set('execution', 'single_thread_matlab', 'True')

# Specification to MATLAB
MatlabCommand.set_default_matlab_cmd("matlab -nodesktop -nosplash")

fsl.FSLCommand.set_default_output_type('NIFTI')

# Specify Variables
experiment_dir = '/data/eaxfjord/fmriRSWorkingDir/nipype/'
data_dir = os.path.join(experiment_dir, 'data')  # location of the data
working_dir = 'working_dir_PreProc_Final'
output_dir = 'output_dir_PreProc3_Final'


# Tissue probability map
tpm = '/usr/local/matlabtools/2014b/spm12/tpm/TPM.nii'
コード例 #27
0
def test_debug_mode():
    from ... import logging

    sofc_config = config.get('execution', 'stop_on_first_crash')
    ruo_config = config.get('execution', 'remove_unnecessary_outputs')
    ki_config = config.get('execution', 'keep_inputs')
    wf_config = config.get('logging', 'workflow_level')
    if_config = config.get('logging', 'interface_level')
    ut_config = config.get('logging', 'utils_level')

    wf_level = logging.getLogger('nipype.workflow').level
    if_level = logging.getLogger('nipype.interface').level
    ut_level = logging.getLogger('nipype.utils').level

    config.enable_debug_mode()

    # Check config is updated and logging levels, too
    assert config.get('execution', 'stop_on_first_crash') == 'true'
    assert config.get('execution', 'remove_unnecessary_outputs') == 'false'
    assert config.get('execution', 'keep_inputs') == 'true'
    assert config.get('logging', 'workflow_level') == 'DEBUG'
    assert config.get('logging', 'interface_level') == 'DEBUG'
    assert config.get('logging', 'utils_level') == 'DEBUG'

    assert logging.getLogger('nipype.workflow').level == 10
    assert logging.getLogger('nipype.interface').level == 10
    assert logging.getLogger('nipype.utils').level == 10

    # Restore config and levels
    config.set('execution', 'stop_on_first_crash', sofc_config)
    config.set('execution', 'remove_unnecessary_outputs', ruo_config)
    config.set('execution', 'keep_inputs', ki_config)
    config.set('logging', 'workflow_level', wf_config)
    config.set('logging', 'interface_level', if_config)
    config.set('logging', 'utils_level', ut_config)
    logging.update_logging(config)

    assert config.get('execution', 'stop_on_first_crash') == sofc_config
    assert config.get('execution', 'remove_unnecessary_outputs') == ruo_config
    assert config.get('execution', 'keep_inputs') == ki_config
    assert config.get('logging', 'workflow_level') == wf_config
    assert config.get('logging', 'interface_level') == if_config
    assert config.get('logging', 'utils_level') == ut_config

    assert logging.getLogger('nipype.workflow').level == wf_level
    assert logging.getLogger('nipype.interface').level == if_level
    assert logging.getLogger('nipype.utils').level == ut_level
コード例 #28
0
def run_basc_workflow_parallelized(
    subject_file_list, roi_mask_file,
    dataset_bootstraps_list, timeseries_bootstraps_list, n_clusters_list, 
    similarity_metric_list, blocklength_list=[1],
    cluster_method_list=['ward'],

    group_dim_reduce=False, output_size_list=[None],

    affinity_threshold_list=[0.0],

    cross_cluster=False, cross_cluster_mask_file=None, 
    out_dir=None, runs=1, proc_mem=None, random_seed=None,
    analysis_id='basc', cache_method='content'
):
    import os
    import nipype.interfaces.io as nio
    import nipype.pipeline.engine as pe
    from nipype import config
    
    config.set('execution', 'keep_inputs', 'true')
    if cache_method == 'content':
        config.set('execution', 'hash_method', 'content')
    else:
        config.set('execution', 'hash_method', 'timestamp')
    
    from PyBASC.pipeline import create_basc_parallelized
    from PyBASC.utils import generate_random_state

    if not out_dir:
        out_dir = os.getcwd()

    analysis_dir = os.path.join(out_dir, analysis_id)

    rng = np.random.RandomState(random_seed)

    for run_id in range(1, runs + 1):

        rng_run = generate_random_state(rng)

        workflow = pe.Workflow(name='pipeline')
        workflow.base_dir = os.path.join(analysis_dir, 'run_%d' % run_id, 'working')

        basc_workflow = create_basc_parallelized(proc_mem, name='basc')

        basc_workflow.inputs.inputspec.set(
            subjects_files=subject_file_list,
            roi_mask_file=roi_mask_file,
            group_dim_reduce=group_dim_reduce,
            cross_cluster=cross_cluster,
            cxc_roi_mask_file=cross_cluster_mask_file,
            random_state_tuple=rng_run.get_state()
        )

        basc_workflow.get_node('inputspec_compression_dim').iterables = [
            ("compression_dim", output_size_list)
        ]
        basc_workflow.get_node('inputspec_boostraps').iterables = [
            ('dataset_bootstraps', dataset_bootstraps_list),
            ('timeseries_bootstraps', timeseries_bootstraps_list),
        ]
        basc_workflow.get_node('inputspec_similarity_metric').iterables = [
            ('similarity_metric', similarity_metric_list)
        ]
        basc_workflow.get_node('inputspec_cluster_method').iterables = [
            ('cluster_method', cluster_method_list)
        ]
        basc_workflow.get_node('inputspec_blocklength').iterables = [
            ('blocklength', blocklength_list)
        ]
        basc_workflow.get_node('inputspec_n_clusters').iterables = [
            ('n_clusters', n_clusters_list)
        ]
        basc_workflow.get_node('inputspec_affinity_threshold').iterables = [
            ('affinity_threshold', affinity_threshold_list)
        ]        

        resource_pool = {}

        resource_pool['group_stability_matrix'] = (basc_workflow, 'outputspec.group_stability_matrix')
        resource_pool['clusters_G'] = (basc_workflow, 'outputspec.clusters_G')
        resource_pool['ism_gsm_corr'] = (basc_workflow, 'outputspec.ism_gsm_corr')
        resource_pool['gsclusters_img'] = (basc_workflow, 'outputspec.gsclusters_img')
        #resource_pool['cluster_voxel_scores_img'] = (basc_workflow, 'outputspec.cluster_voxel_scores_img')
        #resource_pool['cluster_voxel_scores'] = (basc_workflow, 'outputspec.cluster_voxel_scores')
        resource_pool['ind_group_cluster_stability'] = (basc_workflow, 'outputspec.ind_group_cluster_stability')
        resource_pool['individualized_group_clusters'] = (basc_workflow, 'outputspec.individualized_group_clusters')
        resource_pool['ind_group_cluster_labels'] = (basc_workflow, 'outputspec.ind_group_cluster_labels')
        resource_pool['ind_group_cluster_stability_set'] = (basc_workflow, 'outputspec.ind_group_cluster_stability_set')

        ds = pe.Node(nio.DataSink(), name='datasink_workflow_name')
        ds.inputs.base_directory = os.path.join(analysis_dir, 'run_%d' % run_id)
        
        for output in resource_pool.keys():
            node, out_file = resource_pool[output]
            workflow.connect(node, out_file, ds, output)

        
        plugin = 'MultiProc'
        if int(proc_mem[0]) == 1:
            plugin = 'Linear'

        plugin_args = {
            'n_procs': int(proc_mem[0]),
            'memory_gb': int(proc_mem[1])
        }

        # workflow.write_graph(dotfilename='graph.dot', graph2use='exec')
        workflow.run(plugin=plugin, plugin_args=plugin_args)

    return analysis_dir
コード例 #29
0
import os
from IPython.display import Image

from nipype.interfaces.base import (
    traits,
    TraitedSpec,
    CommandLineInputSpec,
    CommandLine,
    File,
    isdefined
)
from nipype.utils.filemanip import fname_presuffix

from nipype import config

config.set('execution', 'display_variable', os.environ['DISPLAY'])
print config.get('execution', 'display_variable')

                        
class MP2RageSkullStripInputSpec(CommandLineInputSpec):
    in_filter_image = traits.File(mandatory=False, argstr='-inFilter %s', desc=' Filter Image')
    in_inv2 = traits.File(exists=True, argstr='-inInv2 %s', desc='Inv2 Image')
    in_t1 = traits.File(exists=True, argstr='-inT1 %s', desc='T1 Map image')
    int_t1_weighted = traits.File(exists=True, argstr='-inT1weighted %s', desc='T1-Weighted Image')
    out_brain_mask = traits.File('brain_mask.nii.gz', usedefault=True, argstr='-outBrain %s', desc='Path/name of brain mask')
    out_masked_t1 = traits.File(argstr='-outMasked %s', desc='Create masked T1')    
    out_masked_t1_weighted = traits.Bool(argstr='-outMasked2 %s', desc='Path/name of masked T1-weighted image')        
    out_masked_filter_image = traits.Bool(argstr='-outMasked3 %s', desc='Path/name of masked Filter image')    
    
class MP2RageSkullStripOutputSpec(TraitedSpec):
    brain_mask = traits.File()
コード例 #30
0
ファイル: test_config.py プロジェクト: TheChymera/nipype
def test_debug_mode():
    from ... import logging

    sofc_config = config.get('execution', 'stop_on_first_crash')
    ruo_config = config.get('execution', 'remove_unnecessary_outputs')
    ki_config = config.get('execution', 'keep_inputs')
    wf_config = config.get('logging', 'workflow_level')
    if_config = config.get('logging', 'interface_level')
    ut_config = config.get('logging', 'utils_level')

    wf_level = logging.getLogger('nipype.workflow').level
    if_level = logging.getLogger('nipype.interface').level
    ut_level = logging.getLogger('nipype.utils').level

    config.enable_debug_mode()

    # Check config is updated and logging levels, too
    assert config.get('execution', 'stop_on_first_crash') == 'true'
    assert config.get('execution', 'remove_unnecessary_outputs') == 'false'
    assert config.get('execution', 'keep_inputs') == 'true'
    assert config.get('logging', 'workflow_level') == 'DEBUG'
    assert config.get('logging', 'interface_level') == 'DEBUG'
    assert config.get('logging', 'utils_level') == 'DEBUG'

    assert logging.getLogger('nipype.workflow').level == 10
    assert logging.getLogger('nipype.interface').level == 10
    assert logging.getLogger('nipype.utils').level == 10

    # Restore config and levels
    config.set('execution', 'stop_on_first_crash', sofc_config)
    config.set('execution', 'remove_unnecessary_outputs', ruo_config)
    config.set('execution', 'keep_inputs', ki_config)
    config.set('logging', 'workflow_level', wf_config)
    config.set('logging', 'interface_level', if_config)
    config.set('logging', 'utils_level', ut_config)
    logging.update_logging(config)

    assert config.get('execution', 'stop_on_first_crash') == sofc_config
    assert config.get('execution', 'remove_unnecessary_outputs') == ruo_config
    assert config.get('execution', 'keep_inputs') == ki_config
    assert config.get('logging', 'workflow_level') == wf_config
    assert config.get('logging', 'interface_level') == if_config
    assert config.get('logging', 'utils_level') == ut_config

    assert logging.getLogger('nipype.workflow').level == wf_level
    assert logging.getLogger('nipype.interface').level == if_level
    assert logging.getLogger('nipype.utils').level == ut_level
コード例 #31
0
'''
Create nipype workflow using dipy's workflows
https://github.com/nipy/dipy/blob/master/dipy/workflows/
'''
from nipype import config
config.set('execution', 'remove_unnecessary_outputs', 'false')
#config.set('execution', 'crashfile_format', 'txt')

from nipype import Node, Function, Workflow, DataGrabber, IdentityInterface
from nipype.interfaces.io import SelectFiles, DataSink

import os
from glob import glob

# define inputs
recon = 'csd'
analysis = 'dipy_0.16_workflows_%s'%recon
num_threads = 4
b0_thresh = 80

project_dir = os.path.abspath('/om2/user/ksitek/hcp_7t/')
data_dir = os.path.join(project_dir, 'data')
out_dir = os.path.join(project_dir, 'derivatives', analysis)
work_dir = os.path.join('/om2/scratch/ksitek/hcp/', analysis)

''' define subjects '''
sub_list = [os.path.basename(x) for x in sorted(glob(project_dir+'/data/13*'))]
#sub_list = ['100610'] # test on one subject

''' set up nodes '''
# set up iterables
コード例 #32
0
ファイル: mcnab_FIR.py プロジェクト: jsalva/gates_analysis
import nipype.interfaces.freesurfer as fs
import nipype.interfaces.utility as util
from nipype import config
from subject_info import *
import time
import os
from fir_utils import *
study_dir = os.path.abspath('/mindhive/gablab/GATES/Analysis/MCNAB/')
data_dir = os.path.abspath('/mindhive/gablab/GATES/data/')
onsets_dir = os.path.join(data_dir, 'onsets/MCNAB/')
subjects_dir = os.path.abspath('/mindhive/xnat/surfaces/GATES/')
os.environ["SUBJECTS_DIR"] = subjects_dir
fs.FSCommand.set_default_subjects_dir(subjects_dir)
mlab.MatlabCommand.set_default_matlab_cmd("/software/matlab_versions/2010b/bin/matlab -nodesktop -nosplash")
mlab.MatlabCommand.set_default_paths('/software/spm8_4290')
config.set('execution','keep_inputs','true')
config.set('execution','remove_unnecessary_outputs','false')


def gen_peak_mask(anat_mask,func_activation):
    import nibabel as nb 
    import numpy as np
    from fir_utils import (binarize_peak, dilate_mask)
    import sys
    import os

    anatmask = nb.load(anat_mask)
    mask_data = np.asarray(anatmask.get_data())
    mask_header = anatmask.get_header()
    mask_affine = anatmask.get_affine()
コード例 #33
0
ファイル: mcnab_model1.py プロジェクト: jsalva/gates_analysis
import nipype.interfaces.matlab as mlab  # how to run matlab
import nipype.interfaces.spm as spm  # spm
import nipype.interfaces.utility as util  # utility
import nipype.pipeline.engine as pe  # pypeline engine
from nipype.utils.filemanip import loadflat  # some useful stuff for debugging
import scipy.io as sio
import numpy as np
from nipype.interfaces.base import Bunch
from copy import deepcopy
import sys
from nipype import config
from compcor_workflow import create_compcorr, extract_noise_components
from subject_info import info, subject_list


config.set("execution", "keep_inputs", "true")
config.set("execution", "remove_unnecessary_outputs", "false")
##############################################################################
#                               ARGUMENTS
##############################################################################
# perhaps implement with argparse
print sys.argv
if not len(sys.argv) == 4:
    sys.stderr.write("The paradigm and analysis level must be provided on the command line")
    sys.exit("The paradigm and analysis level must be provided on the command line")
elif sys.argv[1] in ["WMSTAT", "WM", "Nback_spatial", "Nback_letters", "MCNAB"]:
    # originally 'condition'
    paradigm = sys.argv[1]
    if sys.argv[2] in ["l1", "l2"]:
        levelToRun = sys.argv[2]
    else:
コード例 #34
0
    parser.add_argument("--work_dir", type=str, required=True)

    parser.add_argument("--out_dir", type=str, required=True)

    parser.add_argument('--debug',
                        dest='debug',
                        action='store_true',
                        help='debug mode')

    args = parser.parse_args()

    if args.debug:
        from nipype import config
        config.enable_debug_mode()
        config.set('execution', 'stop_on_first_crash', 'true')
        config.set('execution', 'remove_unnecessary_outputs', 'false')
        config.set('execution', 'keep_inputs', 'true')
        config.set('logging', 'workflow_level', 'DEBUG')
        config.set('logging', 'interface_level', 'DEBUG')
        config.set('logging', 'utils_level', 'DEBUG')

    wf = create_workflow(xfm_dir=os.path.abspath(args.xfm_dir),
                         xfm_pattern=args.xfm_pattern,
                         atlas_dir=os.path.abspath(args.atlas_dir),
                         atlas_pattern=args.atlas_pattern,
                         source_dir=os.path.abspath(args.source_dir),
                         source_pattern=args.source_pattern,
                         work_dir=os.path.abspath(args.work_dir),
                         out_dir=os.path.abspath(args.out_dir),
                         name=args.name)
コード例 #35
0
def main(model_dir=MODEL_DIR, subid_label='mrishare_id', design_input='fsgd'):
    '''
    Runs the WF for all the groups found in model dir. 
    '''

    wd = "/beegfs_data/scratch/tsuchida-SBM"
    fs_subdir = '/data/analyses/work_in_progress/freesurfer/fsmrishare-flair6.0/'

    # find the groups in group dirs
    group_csv_glob = glob.glob(op.join(model_dir, '*', 'group_info.csv'))
    print('Found {} group_info.csv...'.format(len(group_csv_glob)))

    if not group_csv_glob:
        raise Exception('No group info found in the model dir')

    for group_info_path in group_csv_glob:
        group_info = pd.read_csv(group_info_path)
        group_name = group_info_path.split('/')[-2]
        group_dir = op.dirname(group_info_path)

        # Copy the models to wd
        wd_indir = op.join(wd, 'input_dir')
        os.makedirs(wd_indir, exist_ok=True)
        group_indir = op.join(wd_indir, group_name)

        sp.call(['rsync', '-avh', '{}/'.format(group_dir), group_indir])

        # get the model name list
        model_dirs = glob.glob(op.join(group_dir, 'Model*/'))

        if not model_dirs:
            print('No Model dirs found for the group {}'.format(group_name))
            break

        model_names = [m.split('/')[-2] for m in sorted(model_dirs)]

        print('Found {} following models fround for the group {}'.format(
            len(model_names), group_name))
        print('Gathering contrast info for each model...')

        model_info = {}
        for model_name, model_dir in zip(model_names, model_dirs):
            contrast_files = glob.glob(op.join(model_dir, '*.mtx'))
            if contrast_files:
                contrast_names = [
                    op.basename(f).replace('.mtx', '') for f in contrast_files
                ]
                print('{}: {}'.format(model_name, contrast_names))

                for f, name in zip(contrast_files, contrast_names):
                    sign_file = f.replace('.mtx', '.mdtx')
                    if not op.exists(sign_file):
                        raise Exception(
                            'Could not find corresponding sign file for contrast {}'
                            .format(name))
                    else:
                        model_info[model_name] = 'dods'

        # log dir
        log_dir = op.join(os.getcwd(), 'log_dir', group_name)
        os.makedirs(log_dir, exist_ok=True)

        # WF for the group
        group_sublist = group_info[subid_label].values.tolist()
        wf = genFreesurferSBMglmWF(name='SBM_{}'.format(group_name),
                                   base_dir=wd,
                                   group_sublist=group_sublist,
                                   model_dir=group_indir,
                                   model_info=model_info,
                                   design_input=design_input,
                                   fs_subjects_dir=fs_subdir,
                                   fwhm=[0.0, 10.0],
                                   measure_list=['thickness', 'area'],
                                   target_atlas='fsaverage',
                                   target_atlas_surfreg='sphere.reg',
                                   correction_method='FDR')

        config.update_config(
            {'logging': {
                'log_directory': log_dir,
                'log_to_file': True
            }})
        logging.update_logging(config)
        config.set('execution', 'job_finished_timeout', '20.0')
        config.set('execution', 'keep_inputs', 'true')

        wf.run(
            plugin='SLURM',
            plugin_args={  #'sbatch_args': '--partition=gindev',
                'dont_resubmit_completed_jobs': True,
                'max_jobs': 50
            })
コード例 #36
0
def test_debug_mode():
    from ... import logging

    sofc_config = config.get("execution", "stop_on_first_crash")
    ruo_config = config.get("execution", "remove_unnecessary_outputs")
    ki_config = config.get("execution", "keep_inputs")
    wf_config = config.get("logging", "workflow_level")
    if_config = config.get("logging", "interface_level")
    ut_config = config.get("logging", "utils_level")

    wf_level = logging.getLogger("nipype.workflow").level
    if_level = logging.getLogger("nipype.interface").level
    ut_level = logging.getLogger("nipype.utils").level

    config.enable_debug_mode()

    # Check config is updated and logging levels, too
    assert config.get("execution", "stop_on_first_crash") == "true"
    assert config.get("execution", "remove_unnecessary_outputs") == "false"
    assert config.get("execution", "keep_inputs") == "true"
    assert config.get("logging", "workflow_level") == "DEBUG"
    assert config.get("logging", "interface_level") == "DEBUG"
    assert config.get("logging", "utils_level") == "DEBUG"

    assert logging.getLogger("nipype.workflow").level == 10
    assert logging.getLogger("nipype.interface").level == 10
    assert logging.getLogger("nipype.utils").level == 10

    # Restore config and levels
    config.set("execution", "stop_on_first_crash", sofc_config)
    config.set("execution", "remove_unnecessary_outputs", ruo_config)
    config.set("execution", "keep_inputs", ki_config)
    config.set("logging", "workflow_level", wf_config)
    config.set("logging", "interface_level", if_config)
    config.set("logging", "utils_level", ut_config)
    logging.update_logging(config)

    assert config.get("execution", "stop_on_first_crash") == sofc_config
    assert config.get("execution", "remove_unnecessary_outputs") == ruo_config
    assert config.get("execution", "keep_inputs") == ki_config
    assert config.get("logging", "workflow_level") == wf_config
    assert config.get("logging", "interface_level") == if_config
    assert config.get("logging", "utils_level") == ut_config

    assert logging.getLogger("nipype.workflow").level == wf_level
    assert logging.getLogger("nipype.interface").level == if_level
    assert logging.getLogger("nipype.utils").level == ut_level
コード例 #37
0
from sfDM.vis import colormaps
from sfDM.vis.map_maker import MapMaker
import re
from nipype import config

fsl.FSLCommand.set_default_output_type('NIFTI')

#Data set up
config_file = os.environ['fdm_config']
timeline_file = os.environ['fdm_timeline']

with open(config_file, 'r') as f:
    cfg = json.load(f)
parent = cfg['parent_dir']

config.set('execution', 'crashdump_dir', parent)

with open(timeline_file, 'r') as f:
    tmln = json.load(f)

outputdir = parent + '/FDM/Outputs/'

#-----------------------Begin Map Calculations--------------------------------------------------------------------#

fsl.FSLCommand.set_default_output_type('NIFTI')

scan_list = [
    "scan_{0:0>2d}".format(x + 1) for x in range(cfg['number_of_scans'])
]
print scan_list
コード例 #38
0
'''
After creating tractography streamlines with dipy_csd.py,
this workflow takes an atlas file and finds connections
between each region in the atlas
KRS 2018.05.04
'''
from nipype import config
config.set('execution', 'remove_unnecessary_outputs', 'false')
config.set('execution', 'crashfile_format', 'txt')

#config.enable_provenance()

from nipype import Node, Function, Workflow, IdentityInterface
from nipype.interfaces.io import SelectFiles, DataSink

import os
from glob import glob

# which data sampling? also used for naming
resolution = '1.0mm'
out_prefix = 'fathresh-0.1'

data_dir = os.path.abspath('/om2/user/ksitek/exvivo/data')
out_dir = os.join('/om2/user/ksitek/exvivo/analysis/dipy_csd/',
                  '%s_%s_anat-atlas/' % (out_prefix, resolution))
sids = ['Reg_S64550']

if not os.path.exists(out_dir):
    os.mkdir(out_dir)

work_dir = os.path.abspath('/om2/scratch/ksitek/dipy_csd/%s_%s/' %
コード例 #39
0
def run_basc_workflow(
    subject_file_list, roi_mask_file,
    dataset_bootstraps, timeseries_bootstraps, n_clusters, output_size,
    bootstrap_list, proc_mem, similarity_metric, group_dim_reduce=False,
    cross_cluster=False, cross_cluster_mask_file=None, blocklength=1,
    affinity_threshold=0.0, cluster_method='ward', out_dir=None, run=True
):
    
    """Run the 'template_workflow' function to execute the modular workflow
    with the provided inputs.
    :type input_resource: str
    :param input_resource: The filepath of the { input resource }. Can have
                           multiple.
    :type out_dir: str
    :param out_dir: (default: None) The output directory to write the results
                    to; if left as None, will write to the current directory.
    :type run: bool
    :param run: (default: True) Will run the workflow; if set to False, will
                connect the Nipype workflow and return the workflow object
                instead.
    :rtype: str
    :return: (if run=True) The filepath of the generated anatomical_reorient
             file.
    :rtype: Nipype workflow object
    :return: (if run=False) The connected Nipype workflow object.
    :rtype: str
    :return: (if run=False) The base directory of the workflow if it were to
             be run.
    """

    import os
    import glob

    import nipype.interfaces.io as nio
    import nipype.pipeline.engine as pe
    
    from PyBASC.pipeline import create_basc
    from nipype import config

    config.set('execution', 'keep_inputs', 'true')
    workflow = pe.Workflow(name='basc_workflow_runner')

    if not out_dir:
        out_dir = os.getcwd()

    workflow.base_dir = out_dir

    resource_pool = {}

    basc = create_basc(proc_mem, name='basc')
    basc.inputs.inputspec.set(
        subjects_files=subject_file_list,
        roi_mask_file=roi_mask_file,
        dataset_bootstraps=dataset_bootstraps,
        timeseries_bootstraps=timeseries_bootstraps,
        n_clusters=n_clusters,
        compression_dim=output_size,
        bootstrap_list=bootstrap_list,
        similarity_metric=similarity_metric,
        group_dim_reduce=group_dim_reduce,
        cross_cluster=cross_cluster,
        cxc_roi_mask_file=cross_cluster_mask_file,
        blocklength=blocklength,
        affinity_threshold=affinity_threshold,
        cluster_method=cluster_method
    )
    
    resource_pool['group_stability_matrix'] = (basc, 'outputspec.group_stability_matrix')
    resource_pool['clusters_G'] = (basc, 'outputspec.clusters_G')
    resource_pool['ism_gsm_corr'] = (basc, 'outputspec.ism_gsm_corr')
    resource_pool['gsclusters_img'] = (basc, 'outputspec.gsclusters_img')
    #resource_pool['cluster_voxel_scores_img'] = (basc, 'outputspec.cluster_voxel_scores_img')
    #resource_pool['cluster_voxel_scores'] = (basc, 'outputspec.cluster_voxel_scores')
    resource_pool['ind_group_cluster_stability'] = (basc, 'outputspec.ind_group_cluster_stability')
    resource_pool['individualized_group_clusters'] = (basc, 'outputspec.individualized_group_clusters')
    resource_pool['ind_group_cluster_labels'] = (basc, 'outputspec.ind_group_cluster_labels')
    resource_pool['ind_group_cluster_stability_set'] = (basc, 'outputspec.ind_group_cluster_stability_set')


    ds = pe.Node(nio.DataSink(), name='datasink_workflow_name')
    ds.inputs.base_directory = out_dir
    
    for output in resource_pool.keys():
        node, out_file = resource_pool[output]
        workflow.connect(node, out_file, ds, output)


    plugin = 'MultiProc'
    if int(proc_mem[0]) == 1:
        plugin = 'Linear'

    plugin_args = {
        'n_procs': int(proc_mem[0]),
        'memory_gb': int(proc_mem[1])
    }

    workflow.run(plugin=plugin, plugin_args=plugin_args)
    outpath = glob.glob(os.path.join(out_dir, "*", "*"))
    return outpath
コード例 #40
0
#!/usr/bin/env python

import os
import nipype.interfaces.spm as spm
import nipype.pipeline.engine as pe
from nipype import config, logging

config.set("logging", "filemanip_level", 'DEBUG')
logging.update_logging(config)

realign = pe.Node(spm.Realign(), name='realign')
realign.inputs.in_files = [
    os.path.abspath(p) for p in ['src/bold_run1.img', 'src/bold_run2.img']
]
realign.inputs.jobtype = 'estwrite'
realign.inputs.write_which = [0, 1]

coregister = pe.Node(spm.Coregister(), name='coregister')
coregister.inputs.target = os.path.abspath('src/highres001.img')
coregister.inputs.jobtype = 'estimate'

wf = pe.Workflow(name='spm_preproc')
wf.base_dir = './nipype-workingdir'

wf.connect([
    (realign, coregister,
        [('modified_in_files', 'apply_to_files'),
         ('mean_image', 'source')]),

])
コード例 #41
0
ファイル: fdm_maps.py プロジェクト: PIRCImagingTools/sfDM
from sfDM.vis.map_maker import MapMaker
import re
from nipype import config

fsl.FSLCommand.set_default_output_type('NIFTI')


#Data set up
config_file = os.environ['fdm_config']
timeline_file = os.environ['fdm_timeline']

with open(config_file, 'r') as f:
    cfg = json.load(f)
parent = cfg['parent_dir']

config.set('execution','crashdump_dir',parent)

with open(timeline_file, 'r') as f:
    tmln = json.load(f)

outputdir=parent+'/FDM/Outputs/'

#-----------------------Begin Map Calculations--------------------------------------------------------------------#

fsl.FSLCommand.set_default_output_type('NIFTI')

scan_list = ["scan_{0:0>2d}".format(x+1) for x in range(cfg['number_of_scans'])]
print scan_list


Tumor=[outputdir+'Tumor/'+scan+'/'+(os.listdir(outputdir+'Tumor/'+scan)[0]) for scan in scan_list]
コード例 #42
0
ファイル: exec.py プロジェクト: atsuch/nighres
input_dir = sys.argv[3]
sub_file = sys.argv[4]
atlas = sys.argv[5]
grid = sys.argv[6]

subjects = pickle.load(open(sub_file, "rb"))

wf = Lesion_extractor(
    wf_name=wf_name,
    base_dir=base_dir,
    input_dir=input_dir,
    subjects=subjects,
    #main=main,
    #acc=acc,
    atlas=atlas)

config.update_config(
    {'logging': {
        'log_directory': wf.base_dir,
        'log_to_file': True
    }})
logging.update_logging(config)
config.set('execution', 'job_finished_timeout', '20.0')
wf.config['execution'] = {'job_finished_timeout': '10.0'}
try:
    wf.run('SLURM', plugin_args={'sbatch_args': '-p ' + grid})
    #wf.write_graph(dotfilename='WAIMEA', graph2use='colored', format='png', simple_form=True)
except:
    print('Error! Pipeline exited with exception:')
    raise
コード例 #43
0
def test_PathReferenceTracer_indirect_refs(tmp_path):
    from nipype import config
    import nipype.interfaces.utility as niu
    import nipype.pipeline.engine as pe

    os.chdir(str(tmp_path))

    config.set_default_config()
    config.set("execution", "remove_unnecessary_outputs", False)

    wf = pe.Workflow("w", base_dir=Path.cwd())

    x = pe.Node(interface=niu.Function(function=totxt,
                                       input_names=["a", "b"],
                                       output_names=["c", "d"]),
                name="x")
    x.inputs.a = 1
    x.inputs.b = 2

    y = pe.Node(interface=niu.Function(function=select,
                                       input_names=["a", "b"],
                                       output_names=["c"]),
                name="y")
    wf.connect(x, "c", y, "a")
    wf.connect(x, "d", y, "b")

    z = pe.Node(interface=niu.Function(function=select,
                                       input_names=["a", "b"],
                                       output_names=["c"]),
                name="z")
    wf.connect(y, "c", z, "a")
    wf.connect(y, "c", z, "b")

    execgraph = wf.run(plugin=DontRunRunner())

    def get_node(name):
        for node in execgraph.nodes():
            if node.name == name:
                return node

    x = get_node("x")
    y = get_node("y")
    z = get_node("z")

    rt = PathReferenceTracer()

    for node in execgraph.nodes:
        rt.add_node(node)
    for node in execgraph.nodes:
        rt.set_node_pending(node)

    xrf = rt.node_resultfile_path(x)
    yrf = rt.node_resultfile_path(y)
    zrf = rt.node_resultfile_path(z)

    result = x.run()
    rt.set_node_complete(x, True)

    c = result.outputs.c
    d = result.outputs.d

    assert rt.refs[xrf] == set([yrf])
    assert rt.refs[yrf] == set([zrf])
    assert rt.refs[zrf] == set([])
    assert rt.refs[c] == set([xrf])
    assert rt.refs[d] == set([xrf])

    assert rt.deps[xrf] == set([xrf.parent, c, d])
    assert rt.deps[yrf] == set([yrf.parent, xrf])
    assert rt.deps[zrf] == set([zrf.parent, yrf])
    assert rt.deps[c] == set([xrf.parent])
    assert rt.deps[d] == set([xrf.parent])

    y.run()
    rt.set_node_complete(y, True)

    assert rt.refs[xrf] == set([])
    assert rt.refs[yrf] == set([zrf])
    assert rt.refs[zrf] == set([])
    assert rt.refs[c] == set([xrf])
    assert rt.refs[d] == set([xrf, yrf])

    assert rt.deps[xrf] == set([xrf.parent, c, d])
    assert rt.deps[yrf] == set([yrf.parent, d])
    assert rt.deps[zrf] == set([zrf.parent, yrf])
    assert rt.deps[c] == set([xrf.parent])
    assert rt.deps[d] == set([xrf.parent])

    rtc = set(rt.collect())
    assert rtc == set([xrf, c])
コード例 #44
0
        if not isdefined(self.inputs.out_file):
            outputs["resized_file"] = self._gen_filename(self.inputs.in_file)
        else:
            outputs["resized_file"] = os.path.abspath(self.inputs.out_file)
        return outputs

    def _gen_filename(self, name):
        pth, fn, ext = filemanip.split_filename(self.inputs.in_file)
        return os.path.join(os.getcwd(), fn + "_resized" + ext)


from nipype.utils.filemanip import fname_presuffix

from nipype import config

config.set("execution", "display_variable", os.environ["DISPLAY"])
print config.get("execution", "display_variable")


class MP2RageSkullStripInputSpec(CommandLineInputSpec):
    in_filter_image = traits.File(mandatory=False, argstr="-inFilter %s", desc=" Filter Image")
    in_inv2 = traits.File(exists=True, argstr="-inInv2 %s", desc="Inv2 Image")
    in_t1 = traits.File(exists=True, argstr="-inT1 %s", desc="T1 Map image")
    in_t1_weighted = traits.File(exists=True, argstr="-inT1weighted %s", desc="T1-Weighted Image")
    out_brain_mask = traits.File(
        "brain_mask.nii.gz", usedefault=True, argstr="-outBrain %s", desc="Path/name of brain mask"
    )
    out_masked_t1 = traits.Bool(True, usedefault=True, argstr="-outMasked %s", desc="Create masked T1")
    out_masked_t1_weighted = traits.Bool(
        True, usedefault=True, argstr="-outMasked2 %s", desc="Path/name of masked T1-weighted image"
    )