Esempio n. 1
0
def aseg_to_tissuemaps(aseg):
    from nipype import config, logging
    config.enable_debug_mode()
    logging.update_logging(config)
    from nibabel import load, save, Nifti1Image
    from numpy import zeros_like
    from os.path import abspath
    aseg_nifti = load(aseg)
    aseg_data = aseg_nifti.get_data()
    cortical_labels = [3, 42]
    subcortical_labels =[8, 10, 11, 12, 13, 17, 18, 26, 47, 49, 50, 51, 52, 53, 54, 58]

    #creating array of zeroes that replaces 0's with 1's when matches values of subcortical_labels
    cortical_data = zeros_like(aseg_data)
    for x in cortical_labels:
        cortical_data[aseg_data == x] = 1
    cortical_nifti = Nifti1Image(cortical_data, aseg_nifti.affine)
    
    subcort_data = zeros_like(aseg_data) 
    for x in subcortical_labels:
        subcort_data[aseg_data == x] = 1
    subcort_nifti = Nifti1Image(subcort_data, aseg_nifti.affine)
    
    save(subcort_nifti, "subcortical_gm.nii.gz")
    save(cortical_nifti, "cortical_gm.nii.gz")
    subcort_file = abspath("subcortical_gm.nii.gz")
    cortical_file = abspath("cortical_gm.nii.gz")
    gm_list = [subcort_file, cortical_file]
    return(gm_list)
def check_mask_coverage(epi,brainmask):
    from os.path import abspath
    from nipype import config, logging
    config.enable_debug_mode()
    logging.update_logging(config)
    from nilearn import plotting
    from nipype.interfaces.nipy.preprocess import Trim

    trim = Trim()
    trim.inputs.in_file = epi
    trim.inputs.end_index = 1
    trim.inputs.out_file = 'epi_vol1.nii.gz'
    trim.run()
    epi_vol = abspath('epi_vol1.nii.gz')

    maskcheck_filename='maskcheck.png'
    display = plotting.plot_anat(epi_vol, display_mode='ortho',
                                 draw_cross=False,
                                 title = 'brainmask coverage')
    display.add_contours(brainmask,levels=[.5], colors='r')
    display.savefig(maskcheck_filename)
    display.close()
    maskcheck_file = abspath(maskcheck_filename)

    return(maskcheck_file)
Esempio n. 3
0
def make3DTemplate(subject_T1s, num_proc, output_prefix):
    from nipype import config, logging
    config.enable_debug_mode()
    logging.update_logging(config)
    
    from os.path import abspath, split
    from os import getcwd
    from shutil import copyfile
    from glob import glob
    from subprocess import call

    curr_dir = getcwd()

    #copy T1s into current directory
    for T in range(0,len(subject_T1s)):
        [dirname,filename] = split(subject_T1s[T])
        copyfile(subject_T1s[T],curr_dir + '/S' + str(T)+'_'+filename)

    # -c flag is control for local computing (2= use localhost; required for -j flag)
    # -j flag is for number of processors allowed
    call(['antsMultivariateTemplateConstruction2.sh', '–d','3','–o', output_prefix,'–r','1','–c','2','–j', str(num_proc), '*.nii.gz'])
    
    sample_template = abspath(output_prefix + 'template0.nii.gz')
    
    return(sample_template)
Esempio n. 4
0
def adjust_masks(masks):
    from os.path import abspath
    from nipype import config, logging
    config.enable_debug_mode()
    logging.update_logging(config)

    from nipype.interfaces.freesurfer.model import Binarize
    #pve0 = csf, pve1 = gm, pve2 = wm

    origvols = sorted(masks)
    csf = origvols[0]
    wm = origvols[2]

    erode = Binarize()
    erode.inputs.in_file = wm
    erode.inputs.erode = 1
    erode.inputs.min = 0.5
    erode.inputs.max = 1000
    erode.inputs.binary_file = 'WM_seg.nii'
    erode.run()

    wm_new = abspath(erode.inputs.binary_file)

    vols = []
    vols.append(wm_new)
    vols.append(csf)

    return (vols)
def sort_pes(pes):
    from nipype import config, logging
    from nipype.interfaces.fsl import Merge
    from os.path import abspath
    config.enable_debug_mode()
    logging.update_logging(config)

    print(pes)
    pe1s = []
    pe0s = []
    for file in pes:
        if 'pe0' in file:
            pe0s.append(file)
        elif 'pe1' in file:
            pe1s.append(file)

    pe1s = sorted(pe1s)
    pe0s = sorted(pe0s)

    me = Merge()
    merged_pes = []

    for i in range(0,len(pe1s)):
        num=pe1s[i][-12:-11]
        me.inputs.in_files = [pe1s[i],pe0s[i]]
        me.inputs.dimension='t'
        me.inputs.merged_file = 'merged_pes%s.nii.gz' % num
        me.run()
        file = abspath('merged_pes%s.nii.gz' % num)
        merged_pes.append(file)

    return(merged_pes)
Esempio n. 6
0
def run(args):
    """Get and process specific information"""
    project = gather_project_info()
    exp = gather_experiment_info(args.experiment, args.model)

    # Subject is always highest level of parameterization
    subject_list = determine_subjects(args.subjects)
    subj_source = make_subject_source(subject_list)

    # Get the full correct name for the experiment
    if args.experiment is None:
        exp_name = project["default_exp"]
    else:
        exp_name = args.experiment

    exp['exp_name'] = exp_name
    exp['model_name'] = args.model if args.model else ''

    # Set roots of output storage
    project['analysis_dir'] = op.join(project["analysis_dir"], exp_name)
    project['working_dir'] = op.join(project["working_dir"], exp_name,
                                     exp['model_name'])

    config.set("execution", "crashdump_dir", project["crash_dir"])
    if args.verbose > 0:
        config.set("logging", "filemanip_level", 'DEBUG')
        config.enable_debug_mode()
        logging.update_logging(config)

    if not op.exists(project['analysis_dir']):
        os.makedirs(project['analysis_dir'])

    workflows_dir = os.path.join(os.environ['FITZ_DIR'], exp['pipeline'],
                                 'workflows')
    if not op.isdir(workflows_dir):
        missing_pipe = 'raise'
        if missing_pipe == 'install':
            install(args)
        else:
            raise IOError("Run `fitz install` to set up your pipeline of "
                          "workflows, %s does not exist." % workflows_dir)
    sys.path.insert(0, workflows_dir)
    for wf_name in args.workflows:
        try:
            mod = imp.find_module(wf_name)
            wf_module = imp.load_module("wf", *mod)
        except (IOError, ImportError):
            print "Could not find any workflows matching %s" % wf_name
            raise

        params = update_params(wf_module, exp)
        workflow = wf_module.workflow_manager(project, params, args,
                                              subj_source)

        # Run the pipeline
        plugin, plugin_args = determine_engine(args)
        workflow.write_graph(str(workflow) + '.dot', format='svg')
        if not args.dontrun:
            workflow.run(plugin, plugin_args)
Esempio n. 7
0
def run(args):
    """Get and process specific information"""
    project = gather_project_info()
    exp = gather_experiment_info(args.experiment, args.model)

    # Subject is always highest level of parameterization
    subject_list = determine_subjects(args.subjects)
    subj_source = make_subject_source(subject_list)

    # Get the full correct name for the experiment
    if args.experiment is None:
        exp_name = project["default_exp"]
    else:
        exp_name = args.experiment

    exp['exp_name'] = exp_name
    exp['model_name'] = args.model if args.model else ''

    # Set roots of output storage
    project['analysis_dir'] = op.join(project["analysis_dir"], exp_name)
    project['working_dir'] = op.join(project["working_dir"], exp_name,
                                     exp['model_name'])

    config.set("execution", "crashdump_dir", project["crash_dir"])
    if args.verbose > 0:
        config.set("logging", "filemanip_level", 'DEBUG')
        config.enable_debug_mode()
        logging.update_logging(config)

    if not op.exists(project['analysis_dir']):
        os.makedirs(project['analysis_dir'])

    workflows_dir = os.path.join(os.environ['FITZ_DIR'], exp['pipeline'],
                                 'workflows')
    if not op.isdir(workflows_dir):
        missing_pipe = 'raise'
        if missing_pipe == 'install':
            install(args)
        else:
            raise IOError("Run `fitz install` to set up your pipeline of "
                          "workflows, %s does not exist." % workflows_dir)
    sys.path.insert(0, workflows_dir)
    for wf_name in args.workflows:
        try:
            mod = imp.find_module(wf_name)
            wf_module = imp.load_module("wf", *mod)
        except (IOError, ImportError):
            print "Could not find any workflows matching %s" % wf_name
            raise

        params = update_params(wf_module, exp)
        workflow = wf_module.workflow_manager(
            project, params, args, subj_source)

        # Run the pipeline
        plugin, plugin_args = determine_engine(args)
        workflow.write_graph(str(workflow)+'.dot', format='svg')
        if not args.dontrun:
            workflow.run(plugin, plugin_args)
Esempio n. 8
0
def setup_environment(argv):
    print("Configuring environment...")
    import os
    import os.path
    from BAW.utilities.configFileParser import resolveDataSinkOption, parseFile
    from BAW.utilities.pathHandling import validatePath
    from BAW.utilities import misc
    from collections import OrderedDict  # Need OrderedDict internally to ensure consistent ordering
    environment, experiment, pipeline, cluster = parseFile(
        argv["--ExperimentConfig"], argv["--pe"], argv["--workphase"])
    pipeline['ds_overwrite'] = resolveDataSinkOption(argv, pipeline)
    if cluster is None:
        print("Running on local")
        # raise NotImplementedError("Running local has old code and has not been tested!")
        # assert argv["--wfrun"] in argvWFRUN, \
        #    "wfrun  options for clusters can only be given when the configuration file's CLUSTER option == True"
        # os.environ['NSLOTS'] = str(misc.get_cpus(argv["--wf_template_runner"]))
    else:
        load_modules(cluster['modules'])  # Load modules if not already done  ## MODS PATH
        # print os.environ['LOADEDMODULES']
        # if environment['virtualenv_dir'] is not None:  # MODS PATH
        # activate_this = validatePath(
        #    os.path.join(environment['virtualenv_dir'], 'bin', 'activate_this.py'), False, False)
        # if os.path.exists( activate_this ) :
        #    exec(open(activate_this).read(), OrderedDict(__file__=activate_this))
    utilities_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'utilities')
    configure_env = validatePath(os.path.join(utilities_path, 'configure_env.py'), False, False)
    # Add the AutoWorkup directory to the PYTHONPATH every time - REQUIRED FOR CLUSTER DISPATCHING
    environment['env']['PYTHONPATH'] = environment['env']['PYTHONPATH'] + ":" + os.path.dirname(__file__)

    exec(open(configure_env).read(), OrderedDict(__file__=__file__,
                                          append_os_path=environment['env']['PATH'],
                                          append_sys_path=environment['env']['PYTHONPATH'])
         )  # MODS PATH

    print(("@" * 80))
    print((environment['env']['PYTHONPATH']))
    print(("@" * 80))
    print((environment['env']['PATH']))
    print(("@" * 80))

    from nipype import config
    config.enable_debug_mode()
    # config.enable_provenance()

    from BAW.utilities.package_check import verify_packages
    verify_packages()
    if 'FREESURFER' in experiment['components']:  # FREESURFER MODS
        configure_FS = validatePath(os.path.join(utilities_path, 'utilities', 'configure_FS.py'), False, False)
        exec(open(configure_FS).read(), OrderedDict(FS_VARS=misc.FS_VARS, env=environment['env']))
        print("FREESURFER needs to check for sane environment here!")  # TODO: raise warning, write method, what???
    for key, value in list(environment['env'].items()):
        if key in ['PATH', 'PYTHONPATH'] + misc.FS_VARS:
            pass
        else:
            os.environ[key] = value  # Do not use os.putenv (see Python documentation)
    return environment, experiment, pipeline, cluster
Esempio n. 9
0
def setup(argv):
    print "Configuring environment..."
    import os, os.path
    from utilities.configFileParser import resolveDataSinkOption, parseFile
    from utilities.pathHandling import validatePath
    from utilities import misc
    environment, experiment, pipeline, cluster = parseFile(
        argv["--ExperimentConfig"], argv["--pe"])
    pipeline['ds_overwrite'] = resolveDataSinkOption(argv, pipeline)

    if cluster is None:
        assert argv["--wfrun"] in misc.WFRUN, \
          "wfrun options for clusters can only be given when the configuration file's CLUSTER option == True"
        os.environ['NSLOTS'] = str(misc.get_cpus(argv["--wfrun"]))
    else:
        load_modules(cluster['modules']
                     )  # Load modules if not already done  ## MODS PATH
        # print os.environ['LOADEDMODULES']
    if environment['virtualenv_dir']:  ## MODS PATH
        activate_this = validatePath(
            os.path.join(environment['virtualenv_dir'], 'bin',
                         'activate_this.py'), False, False)
        execfile(activate_this, dict(__file__=activate_this))
    utilities_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),
                                  'utilities')
    configure_env = validatePath(
        os.path.join(utilities_path, 'configure_env.py'), False, False)
    # Add the AutoWorkup directory to the PYTHONPATH every time - REQUIRED FOR CLUSTER DISPATCHING
    environment['env']['PYTHONPATH'] = environment['env'][
        'PYTHONPATH'] + ":" + os.path.dirname(__file__)
    execfile(
        configure_env,
        dict(__file__=__file__,
             append_os_path=environment['env']['PATH'],
             append_sys_path=environment['env']['PYTHONPATH']))  ## MODS PATH
    from nipype import config
    config.enable_debug_mode()
    from utilities.package_check import verify_packages
    verify_packages()
    if 'FREESURFER' in experiment['components']:  ## FREESURFER MODS
        configure_FS = validatePath(
            os.path.join(utilities_path, 'utilities', 'configure_FS.py'),
            False, False)
        execfile(configure_FS,
                 dict(FS_VARS=misc.FS_VARS, env=environment['env']))
        print "FREESURFER needs to check for sane environment here!"  # TODO: raise warning, write method, what???
    for key, value in environment['env'].items():
        if key in ['PATH', 'PYTHONPATH'] + misc.FS_VARS:
            pass
        else:
            os.environ[
                key] = value  # Do not use os.putenv (see Python documentation)
    return environment, experiment, pipeline, cluster
def combine_masks(mask1,mask2):
    from nipype.interfaces.fsl.utils import Merge
    from os.path import abspath
    from nipype import config, logging
    config.enable_debug_mode()
    logging.update_logging(config)

    vols = []
    vols.append(mask1)
    vols.append(mask2)

    return(vols)
Esempio n. 11
0
def extract_fisherZ(subj_betas, clusters, cluster_table):
    from os import path
    from numpy import genfromtxt, savetxt
    from nipype import config, logging
    config.enable_debug_mode()
    logging.update_logging(config)
    
    header = []
    clusters = genfromtxt(clusters_table, delimiter='\t', dtype=None, skip_header=1)
    
    savetxt(file, matrix, delimiter='\t', header=header)

    return(table_path)
Esempio n. 12
0
def run_examples(example, pipelines, data_path, plugin=None, rm_base_dir=True):
    from nipype import config
    from nipype.interfaces.base import CommandLine

    if plugin is None:
        plugin = 'MultiProc'

    print('running example: %s with plugin: %s' % (example, plugin))
    config.enable_debug_mode()
    config.enable_provenance()
    CommandLine.set_default_terminal_output("stream")

    plugin_args = {}
    if plugin == 'MultiProc':
        plugin_args['n_procs'] = int(
            os.getenv('NIPYPE_NUMBER_OF_CPUS', cpu_count()))

    module = import_module('.' + example, 'niflow.nipype1.examples')
    for pipeline in pipelines:
        wf = getattr(module, pipeline)
        wf.base_dir = os.path.join(os.getcwd(), 'output', example, plugin)

        results_dir = os.path.join(wf.base_dir, wf.name)
        if rm_base_dir and os.path.exists(results_dir):
            rmtree(results_dir)

        # Handle a logging directory
        log_dir = os.path.join(os.getcwd(), 'logs', example)
        if not os.path.exists(log_dir):
            os.makedirs(log_dir)
        wf.config = {
            'execution': {
                'hash_method': 'timestamp',
                'stop_on_first_rerun': 'true',
                'write_provenance': 'true',
                'poll_sleep_duration': 2
            },
            'logging': {
                'log_directory': log_dir,
                'log_to_file': True
            }
        }
        try:
            wf.inputs.inputnode.in_data = os.path.abspath(data_path)
        except AttributeError:
            pass  # the workflow does not have inputnode.in_data

        wf.run(plugin=plugin, plugin_args=plugin_args)
        # run twice to check if nothing is rerunning
        wf.run(plugin=plugin)
Esempio n. 13
0
def run_examples(example, pipelines, data_path, plugin=None, rm_base_dir=True):
    from nipype import config
    from nipype.interfaces.base import CommandLine

    if plugin is None:
        plugin = 'MultiProc'

    print('running example: %s with plugin: %s' % (example, plugin))
    config.enable_debug_mode()
    config.enable_provenance()
    CommandLine.set_default_terminal_output("stream")

    plugin_args = {}
    if plugin == 'MultiProc':
        plugin_args['n_procs'] = int(
            os.getenv('NIPYPE_NUMBER_OF_CPUS', cpu_count()))

    __import__(example)
    for pipeline in pipelines:
        wf = getattr(sys.modules[example], pipeline)
        wf.base_dir = os.path.join(os.getcwd(), 'output', example, plugin)

        results_dir = os.path.join(wf.base_dir, wf.name)
        if rm_base_dir and os.path.exists(results_dir):
            rmtree(results_dir)

        # Handle a logging directory
        log_dir = os.path.join(os.getcwd(), 'logs', example)
        if not os.path.exists(log_dir):
            os.makedirs(log_dir)
        wf.config = {
            'execution': {
                'hash_method': 'timestamp',
                'stop_on_first_rerun': 'true',
                'write_provenance': 'true',
                'poll_sleep_duration': 2
            },
            'logging': {
                'log_directory': log_dir,
                'log_to_file': True
            }
        }
        try:
            wf.inputs.inputnode.in_data = os.path.abspath(data_path)
        except AttributeError:
            pass  # the workflow does not have inputnode.in_data

        wf.run(plugin=plugin, plugin_args=plugin_args)
        # run twice to check if nothing is rerunning
        wf.run(plugin=plugin)
def convertafni(in_file):
    from nipype.interfaces.afni.utils import AFNItoNIFTI
    from os import path
    from nipype import config, logging
    config.enable_debug_mode()
    logging.update_logging(config)

    cvt = AFNItoNIFTI()
    cvt.inputs.in_file = in_file
    cvt.inputs.out_file = 'func_filtered.nii.gz'
    cvt.run()

    out_file = path.abspath('func_filtered.nii.gz')
    return(out_file)
Esempio n. 15
0
def relabel_fast(fast_tissue_list):
    from nipype import config, logging
    from os.path import split
    from os import rename
    config.enable_debug_mode()
    logging.update_logging(config)
    tissue_list = sorted(fast_tissue_list)
    csf = tissue_list[0]
    wm = tissue_list[2]
    [wd, csf_file] = split(csf)
    [wd, wm_file] = split(wm)
    rename(csf, wd + 'csf.nii.gz')
    rename(wm, wd + 'wm.nii.gz')
    wm_csf = [wd + 'csf.nii.gz', wd + 'wm.nii.gz']
    return(wm_csf)
Esempio n. 16
0
def determine_clusters(clusters_table, min_clust_size):
    from os import path
    from numpy import genfromtxt
    from nipype import config, logging
    config.enable_debug_mode()
    logging.update_logging(config)
    
    clusters = genfromtxt(clusters_table, delimiter='\t', dtype=None, skip_header=1)
    clusters_to_extract = []
    
    for t in clusters:
        if clusters[t][1] >= min_clust_size:
            clusters_to_extract.append(clusters[t][0])
    
    
    return(cluster_index)
def test_debug_mode():
    from ... import logging

    sofc_config = config.get("execution", "stop_on_first_crash")
    ruo_config = config.get("execution", "remove_unnecessary_outputs")
    ki_config = config.get("execution", "keep_inputs")
    wf_config = config.get("logging", "workflow_level")
    if_config = config.get("logging", "interface_level")
    ut_config = config.get("logging", "utils_level")

    wf_level = logging.getLogger("nipype.workflow").level
    if_level = logging.getLogger("nipype.interface").level
    ut_level = logging.getLogger("nipype.utils").level

    config.enable_debug_mode()

    # Check config is updated and logging levels, too
    assert config.get("execution", "stop_on_first_crash") == "true"
    assert config.get("execution", "remove_unnecessary_outputs") == "false"
    assert config.get("execution", "keep_inputs") == "true"
    assert config.get("logging", "workflow_level") == "DEBUG"
    assert config.get("logging", "interface_level") == "DEBUG"
    assert config.get("logging", "utils_level") == "DEBUG"

    assert logging.getLogger("nipype.workflow").level == 10
    assert logging.getLogger("nipype.interface").level == 10
    assert logging.getLogger("nipype.utils").level == 10

    # Restore config and levels
    config.set("execution", "stop_on_first_crash", sofc_config)
    config.set("execution", "remove_unnecessary_outputs", ruo_config)
    config.set("execution", "keep_inputs", ki_config)
    config.set("logging", "workflow_level", wf_config)
    config.set("logging", "interface_level", if_config)
    config.set("logging", "utils_level", ut_config)
    logging.update_logging(config)

    assert config.get("execution", "stop_on_first_crash") == sofc_config
    assert config.get("execution", "remove_unnecessary_outputs") == ruo_config
    assert config.get("execution", "keep_inputs") == ki_config
    assert config.get("logging", "workflow_level") == wf_config
    assert config.get("logging", "interface_level") == if_config
    assert config.get("logging", "utils_level") == ut_config

    assert logging.getLogger("nipype.workflow").level == wf_level
    assert logging.getLogger("nipype.interface").level == if_level
    assert logging.getLogger("nipype.utils").level == ut_level
Esempio n. 18
0
def test_debug_mode():
    from ... import logging

    sofc_config = config.get('execution', 'stop_on_first_crash')
    ruo_config = config.get('execution', 'remove_unnecessary_outputs')
    ki_config = config.get('execution', 'keep_inputs')
    wf_config = config.get('logging', 'workflow_level')
    if_config = config.get('logging', 'interface_level')
    ut_config = config.get('logging', 'utils_level')

    wf_level = logging.getLogger('nipype.workflow').level
    if_level = logging.getLogger('nipype.interface').level
    ut_level = logging.getLogger('nipype.utils').level

    config.enable_debug_mode()

    # Check config is updated and logging levels, too
    assert config.get('execution', 'stop_on_first_crash') == 'true'
    assert config.get('execution', 'remove_unnecessary_outputs') == 'false'
    assert config.get('execution', 'keep_inputs') == 'true'
    assert config.get('logging', 'workflow_level') == 'DEBUG'
    assert config.get('logging', 'interface_level') == 'DEBUG'
    assert config.get('logging', 'utils_level') == 'DEBUG'

    assert logging.getLogger('nipype.workflow').level == 10
    assert logging.getLogger('nipype.interface').level == 10
    assert logging.getLogger('nipype.utils').level == 10

    # Restore config and levels
    config.set('execution', 'stop_on_first_crash', sofc_config)
    config.set('execution', 'remove_unnecessary_outputs', ruo_config)
    config.set('execution', 'keep_inputs', ki_config)
    config.set('logging', 'workflow_level', wf_config)
    config.set('logging', 'interface_level', if_config)
    config.set('logging', 'utils_level', ut_config)
    logging.update_logging(config)

    assert config.get('execution', 'stop_on_first_crash') == sofc_config
    assert config.get('execution', 'remove_unnecessary_outputs') == ruo_config
    assert config.get('execution', 'keep_inputs') == ki_config
    assert config.get('logging', 'workflow_level') == wf_config
    assert config.get('logging', 'interface_level') == if_config
    assert config.get('logging', 'utils_level') == ut_config

    assert logging.getLogger('nipype.workflow').level == wf_level
    assert logging.getLogger('nipype.interface').level == if_level
    assert logging.getLogger('nipype.utils').level == ut_level
Esempio n. 19
0
def main(argv=None):
    import os
    import sys

    from nipype import config
    config.enable_debug_mode()

    #-------------------------------- argument parser
    import argparse
    argParser = argparse.ArgumentParser( description="""****************************
        10-cross validation analysis
        """)
    # workup arguments
    argWfGrp = argParser.add_argument_group( 'argWfGrp', """****************************
        auto workflow arguments for cross validation
        """)
    argWfGrp.add_argument('--experimentalConfigurationFile',
                          help="""experimentalConfigurationFile
        Configuration file name with FULL PATH""",
                          dest='experimentalConfigurationFile', required=True)
    argWfGrp.add_argument( '--expDir',    help="""expDir
        """,
                           dest='expDir', required=False, default=".")
    argWfGrp.add_argument( '--baseDir',    help="""baseDir
        """,
                           dest='baseDir', required=False, default=".")
    argWfGrp.add_argument( '--runOption',    help="""runOption [local/cluster]
        """,
                           dest='runOption', required=False, default="local")
    argWfGrp.add_argument( '--PythonBinDir',    help="""PythonBinDir [local/cluster]
        """,
                           dest='PythonBinDir', required=False, default="NA")
    argWfGrp.add_argument( '--BRAINSToolsSrcDir',    help="""BRAINSToolsSrcDir [local/cluster]
        """,
                           dest='BRAINSToolsSrcDir', required=False, default="NA")
    argWfGrp.add_argument( '--BRAINSToolsBuildDir',    help="""BRAINSToolsBuildDir [local/cluster]
        """,
                           dest='BRAINSToolsBuildDir', required=False, default="NA")

    args = argParser.parse_args()
    similarityComputeWorkflow(args.expDir,
                              args.baseDir,
                              args.experimentalConfigurationFile,
                              args.runOption,
                              args.PythonBinDir,
                              args.BRAINSToolsSrcDir,
                              args.BRAINSToolsBuildDir)
Esempio n. 20
0
def main(argv=None):
    import os
    import sys

    from nipype import config
    config.enable_debug_mode()

    #-------------------------------- argument parser
    import argparse
    argParser = argparse.ArgumentParser( description="""****************************
        10-cross validation analysis
        """)
    # workup arguments
    argWfGrp = argParser.add_argument_group( 'argWfGrp', """****************************
        auto workflow arguments for cross validation
        """)
    argWfGrp.add_argument('--experimentalConfigurationFile',
                          help="""experimentalConfigurationFile
        Configuration file name with FULL PATH""",
                          dest='experimentalConfigurationFile', required=True)
    argWfGrp.add_argument( '--expDir',    help="""expDir
        """,
                           dest='expDir', required=False, default=".")
    argWfGrp.add_argument( '--baseDir',    help="""baseDir
        """,
                           dest='baseDir', required=False, default=".")
    argWfGrp.add_argument( '--runOption',    help="""runOption [local/cluster]
        """,
                           dest='runOption', required=False, default="local")
    argWfGrp.add_argument( '--PythonBinDir',    help="""PythonBinDir [local/cluster]
        """,
                           dest='PythonBinDir', required=False, default="NA")
    argWfGrp.add_argument( '--BRAINSToolsSrcDir',    help="""BRAINSToolsSrcDir [local/cluster]
        """,
                           dest='BRAINSToolsSrcDir', required=False, default="NA")
    argWfGrp.add_argument( '--BRAINSToolsBuildDir',    help="""BRAINSToolsBuildDir [local/cluster]
        """,
                           dest='BRAINSToolsBuildDir', required=False, default="NA")

    args = argParser.parse_args()
    similarityComputeWorkflow(args.expDir,
                              args.baseDir,
                              args.experimentalConfigurationFile,
                              args.runOption,
                              args.PythonBinDir,
                              args.BRAINSToolsSrcDir,
                              args.BRAINSToolsBuildDir)
Esempio n. 21
0
def test_debug_mode():
    from ... import logging

    sofc_config = config.get('execution', 'stop_on_first_crash')
    ruo_config = config.get('execution', 'remove_unnecessary_outputs')
    ki_config = config.get('execution', 'keep_inputs')
    wf_config = config.get('logging', 'workflow_level')
    if_config = config.get('logging', 'interface_level')
    ut_config = config.get('logging', 'utils_level')

    wf_level = logging.getLogger('nipype.workflow').level
    if_level = logging.getLogger('nipype.interface').level
    ut_level = logging.getLogger('nipype.utils').level

    config.enable_debug_mode()

    # Check config is updated and logging levels, too
    assert config.get('execution', 'stop_on_first_crash') == 'true'
    assert config.get('execution', 'remove_unnecessary_outputs') == 'false'
    assert config.get('execution', 'keep_inputs') == 'true'
    assert config.get('logging', 'workflow_level') == 'DEBUG'
    assert config.get('logging', 'interface_level') == 'DEBUG'
    assert config.get('logging', 'utils_level') == 'DEBUG'

    assert logging.getLogger('nipype.workflow').level == 10
    assert logging.getLogger('nipype.interface').level == 10
    assert logging.getLogger('nipype.utils').level == 10

    # Restore config and levels
    config.set('execution', 'stop_on_first_crash', sofc_config)
    config.set('execution', 'remove_unnecessary_outputs', ruo_config)
    config.set('execution', 'keep_inputs', ki_config)
    config.set('logging', 'workflow_level', wf_config)
    config.set('logging', 'interface_level', if_config)
    config.set('logging', 'utils_level', ut_config)
    logging.update_logging(config)

    assert config.get('execution', 'stop_on_first_crash') == sofc_config
    assert config.get('execution', 'remove_unnecessary_outputs') == ruo_config
    assert config.get('execution', 'keep_inputs') == ki_config
    assert config.get('logging', 'workflow_level') == wf_config
    assert config.get('logging', 'interface_level') == if_config
    assert config.get('logging', 'utils_level') == ut_config

    assert logging.getLogger('nipype.workflow').level == wf_level
    assert logging.getLogger('nipype.interface').level == if_level
    assert logging.getLogger('nipype.utils').level == ut_level
Esempio n. 22
0
def run_examples(example, pipelines, plugin):
    print 'running example: %s with plugin: %s'%(example, plugin)
    from nipype import config
    config.enable_debug_mode()
    from nipype.interfaces.base import CommandLine
    CommandLine.set_default_terminal_output("stream")

    __import__(example)
    for pipeline in pipelines:
        wf = getattr(sys.modules[example], pipeline)
        wf.base_dir = os.path.join(os.getcwd(), 'output', example, plugin)
        if os.path.exists(wf.base_dir):
            rmtree(wf.base_dir)
        wf.config = {'execution' :{'hash_method': 'timestamp', 'stop_on_first_rerun': 'true'}}
        wf.run(plugin=plugin, plugin_args={'n_procs': 4})
        #run twice to check if nothing is rerunning
        wf.run(plugin=plugin)
Esempio n. 23
0
def run_examples(example, pipelines, plugin):
    print('running example: %s with plugin: %s' % (example, plugin))
    from nipype import config
    config.enable_debug_mode()
    from nipype.interfaces.base import CommandLine
    CommandLine.set_default_terminal_output("stream")

    __import__(example)
    for pipeline in pipelines:
        wf = getattr(sys.modules[example], pipeline)
        wf.base_dir = os.path.join(os.getcwd(), 'output', example, plugin)
        if os.path.exists(wf.base_dir):
            rmtree(wf.base_dir)
        wf.config = {'execution': {'hash_method': 'timestamp', 'stop_on_first_rerun': 'true'}}
        wf.run(plugin=plugin, plugin_args={'n_procs': 4})
        # run twice to check if nothing is rerunning
        wf.run(plugin=plugin)
def brightthresh(func):
    import nibabel as nib
    from numpy import median, where

    from nipype import config, logging
    config.enable_debug_mode()
    logging.update_logging(config)

    func_nifti1 = nib.load(func)
    func_data = func_nifti1.get_data()
    func_data = func_data.astype(float)

    brain_values = where(func_data > 0)
    median_thresh = median(brain_values)
    bright_thresh = 0.75 * median_thresh

    return(bright_thresh)
Esempio n. 25
0
def prep_logging(opts, output_folder):
    cli_file = f'{output_folder}/rabies_{opts.rabies_stage}.pkl'
    if os.path.isfile(cli_file):
        raise ValueError(f"""
            A previous run was indicated by the presence of {cli_file}.
            This can lead to inconsistencies between previous outputs and the log files.
            To prevent this, you are required to manually remove {cli_file}, and we 
            recommend also removing previous datasinks from the {opts.rabies_stage} RABIES step.
            """)

    with open(cli_file, 'wb') as handle:
        pickle.dump(opts, handle, protocol=pickle.HIGHEST_PROTOCOL)

    # remove old versions of the log if already existing
    log_path = f'{output_folder}/rabies_{opts.rabies_stage}.log'
    if os.path.isfile(log_path):
        os.remove(log_path)

    config.update_config({'logging': {'log_directory': output_folder,
                                    'log_to_file': True}})

    # setting workflow logging level
    if opts.verbose==0:
        level="WARNING"
    elif opts.verbose==1:
        level="INFO"
    elif opts.verbose<=2:
        level="DEBUG"
        config.enable_debug_mode()
    else:
        raise ValueError(f"--verbose must be provided an integer of 0 or above. {opts.verbose} was provided instead.")

    # nipype has hard-coded 'nipype.log' filename; we rename it after it is created, and change the handlers
    logging.update_logging(config)
    os.rename(f'{output_folder}/pypeline.log', log_path)
    # change the handlers path to the desired file
    for logger in logging.loggers.keys():
        log = logging.getLogger(logger)
        handler = log.handlers[0]
        handler.baseFilename = log_path

    # set the defined level of verbose
    log = logging.getLogger('nipype.workflow')
    log.setLevel(level)
    log.debug('Debug ON')
    return log
def combine_par(par_list):
    from nipype import config, logging
    config.enable_debug_mode()
    logging.update_logging(config)
    from os.path import abspath
    from numpy import vstack, savetxt, genfromtxt

    motion = genfromtxt(par_list[0], dtype=float)
    if len(par_list)>1:
        for file in par_list[1:]:
            temp = genfromtxt(par_list[0], dtype=float)
            motion=vstack((motion,temp))

    filename = 'motion.par'
    savetxt(filename, motion, delimiter=' ')
    combined_par = abspath(filename)
    return(combined_par)
Esempio n. 27
0
def brightthresh(func):
    import nibabel as nib
    from numpy import median, where

    from nipype import config, logging
    config.enable_debug_mode()
    logging.update_logging(config)

    func_nifti1 = nib.load(func)
    func_data = func_nifti1.get_data()
    func_data = func_data.astype(float)

    brain_values = where(func_data > 0)
    median_thresh = median(brain_values)
    bright_thresh = 0.75 * median_thresh

    return (bright_thresh)
def create_coreg_plot(epi,anat):
    import os
    from nipype import config, logging
    config.enable_debug_mode()
    logging.update_logging(config)
    from nilearn import plotting

    coreg_filename='coregistration.png'
    display = plotting.plot_anat(epi, display_mode='ortho',
                                 draw_cross=False,
                                 title = 'coregistration to anatomy')
    display.add_edges(anat)
    display.savefig(coreg_filename)
    display.close()
    coreg_file = os.path.abspath(coreg_filename)

    return(coreg_file)
Esempio n. 29
0
def run_examples(example, pipelines, data_path, plugin=None, rm_base_dir=True):
    from nipype import config
    from nipype.interfaces.base import CommandLine

    if plugin is None:
        plugin = "MultiProc"

    print("running example: %s with plugin: %s" % (example, plugin))
    config.enable_debug_mode()
    config.enable_provenance()
    CommandLine.set_default_terminal_output("stream")

    plugin_args = {}
    if plugin == "MultiProc":
        plugin_args["n_procs"] = int(os.getenv("NIPYPE_NUMBER_OF_CPUS", cpu_count()))

    __import__(example)
    for pipeline in pipelines:
        wf = getattr(sys.modules[example], pipeline)
        wf.base_dir = os.path.join(os.getcwd(), "output", example, plugin)

        results_dir = os.path.join(wf.base_dir, wf.name)
        if rm_base_dir and os.path.exists(results_dir):
            rmtree(results_dir)

        # Handle a logging directory
        log_dir = os.path.join(os.getcwd(), "logs", example)
        if not os.path.exists(log_dir):
            os.makedirs(log_dir)
        wf.config = {
            "execution": {
                "hash_method": "timestamp",
                "stop_on_first_rerun": "true",
                "write_provenance": "true",
                "poll_sleep_duration": 2,
            },
            "logging": {"log_directory": log_dir, "log_to_file": True},
        }
        try:
            wf.inputs.inputnode.in_data = os.path.abspath(data_path)
        except AttributeError:
            pass  # the workflow does not have inputnode.in_data

        wf.run(plugin=plugin, plugin_args=plugin_args)
        # run twice to check if nothing is rerunning
        wf.run(plugin=plugin)
Esempio n. 30
0
def create_coreg_plot(epi, anat):
    import os
    from nipype import config, logging
    config.enable_debug_mode()
    logging.update_logging(config)
    from nilearn import plotting

    coreg_filename = 'coregistration.png'
    display = plotting.plot_anat(epi,
                                 display_mode='ortho',
                                 draw_cross=False,
                                 title='coregistration to anatomy')
    display.add_edges(anat)
    display.savefig(coreg_filename)
    display.close()
    coreg_file = os.path.abspath(coreg_filename)

    return (coreg_file)
Esempio n. 31
0
def check_mask_coverage(epi, brainmask):
    import os
    from nipype import config, logging
    config.enable_debug_mode()
    logging.update_logging(config)
    from nilearn import plotting

    maskcheck_filename = 'maskcheck.png'
    display = plotting.plot_anat(epi,
                                 display_mode='ortho',
                                 draw_cross=False,
                                 title='brainmask coverage')
    display.add_contours(brainmask, levels=[.5], colors='r')
    display.savefig(maskcheck_filename)
    display.close()
    maskcheck_file = os.path.abspath(maskcheck_filename)

    return (maskcheck_file)
Esempio n. 32
0
def main(argv=None):
    import os
    import sys

    from nipype import config
    config.enable_debug_mode()
    #-------------------------------- argument parser
    import argparse
    argParser = argparse.ArgumentParser( description="""****************************
        similarity computation between two labels 
        """)
    # workup arguments
    argParser.add_argument('--labelMapFilename1',
                          help="""a filename that will be compared to. """,
                          dest='labelMapFilename1', required=False)

    argParser.add_argument('--labelMapFilename2',
                          help="""a filename that will be compared to. """,
                          dest='labelMapFilename2', required=False)

    argParser.add_argument('--outputCSVFilename',
                          help="""a filename that will store comparative results to. """,
                          dest='outputCSVFilename', required=False)

    argParser.add_argument('--doUnitTest', action='store_true',
                          help="""Do unit test if given""",
                          dest='doUnitTest', required=False)
    args = argParser.parse_args()

    action=False
    if args.doUnitTest :
        unitTest()
        action=True
    if args.labelMapFilename1 or args.labelMapFilename2:
        print os.path.abspath( args.labelMapFilename1 )
        print os.path.abspath( args.labelMapFilename2 )
        print os.path.abspath( args.outputCSVFilename )
        computeSimilarity( os.path.abspath( args.labelMapFilename1 ), 
                           os.path.abspath( args.labelMapFilename2 ),
                           os.path.abspath( args.outputCSVFilename ) )
        action=True
    if not action:
        print """        ***
def combine_fd(fd_list):
    from nipype import config, logging
    config.enable_debug_mode()
    logging.update_logging(config)
    from os.path import abspath
    from numpy import asarray, savetxt

    motion = open(fd_list[0]).read().splitlines()

    if len(fd_list)>1:
        for file in fd_list[1:]:
            temp = open(file).read().splitlines()
            motion = motion+temp

    motion = asarray(motion).astype(float)
    filename = 'FD_full.txt'
    savetxt(filename,motion)
    combined_fd = abspath(filename)
    return(combined_fd)
Esempio n. 34
0
def setup(argv):
    print "Configuring environment..."
    import os, os.path
    from utilities.configFileParser import resolveDataSinkOption, parseFile
    from utilities.pathHandling import validatePath
    from utilities import misc
    environment, experiment, pipeline, cluster = parseFile(argv["--ExperimentConfig"], argv["--pe"])
    pipeline['ds_overwrite'] = resolveDataSinkOption(argv, pipeline)

    if cluster is None:
        assert argv["--wf_template_runner"] in misc.WFRUN, \
          "wf_template_runner options for clusters can only be given when the configuration file's CLUSTER option == True"
        os.environ['NSLOTS'] = str(misc.get_cpus(argv["--wf_template_runner"]))
    else:
        load_modules(cluster['modules'])  # Load modules if not already done  ## MODS PATH
        # print os.environ['LOADEDMODULES']
    if environment['virtualenv_dir']:  ## MODS PATH
        activate_this = validatePath(os.path.join(environment['virtualenv_dir'], 'bin', 'activate_this.py'), False, False)
        execfile(activate_this, dict(__file__=activate_this))
    utilities_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'utilities')
    configure_env = validatePath(os.path.join(utilities_path, 'configure_env.py'), False, False)
    # Add the AutoWorkup directory to the PYTHONPATH every time - REQUIRED FOR CLUSTER DISPATCHING
    environment['env']['PYTHONPATH'] =  environment['env']['PYTHONPATH'] + ":" + os.path.dirname(__file__)
    execfile(configure_env, dict(__file__=__file__,
                                 append_os_path=environment['env']['PATH'],
                                 append_sys_path=environment['env']['PYTHONPATH'])
        )  ## MODS PATH
    from nipype import config
    config.enable_debug_mode()
    from utilities.package_check import verify_packages
    verify_packages()
    if 'FREESURFER' in experiment['components']:  ## FREESURFER MODS
        configure_FS = validatePath(os.path.join(utilities_path, 'utilities', 'configure_FS.py'), False, False)
        execfile(configure_FS, dict(FS_VARS=misc.FS_VARS, env=environment['env']))
        print "FREESURFER needs to check for sane environment here!"  # TODO: raise warning, write method, what???
    for key, value in environment['env'].items():
        if key in ['PATH', 'PYTHONPATH'] + misc.FS_VARS:
            pass
        else:
            os.environ[key] = value  # Do not use os.putenv (see Python documentation)
    return environment, experiment, pipeline, cluster
Esempio n. 35
0
def bandpass_filter(in_file, lowpass, highpass, TR):
    import numpy as np
    import nibabel as nb
    from os import path
    from nipype.interfaces.afni.preprocess import Bandpass
    from nipype import config, logging
    config.enable_debug_mode()
    logging.update_logging(config)

    out_file = 'func_filtered.nii'
    bp = Bandpass()
    bp.inputs.highpass = highpass
    bp.inputs.lowpass = lowpass
    bp.inputs.in_file = in_file
    bp.inputs.tr = TR
    bp.inputs.out_file = out_file
    bp.inputs.outputtype = 'NIFTI'
    bp.run()

    out_file = path.abspath(out_file)
    return (out_file)
Esempio n. 36
0
def run_examples(example, pipelines, data_path, plugin=None):
    from nipype import config
    from nipype.interfaces.base import CommandLine

    if plugin is None:
        plugin = 'MultiProc'

    print('running example: %s with plugin: %s' % (example, plugin))
    config.enable_debug_mode()
    config.enable_provenance()
    CommandLine.set_default_terminal_output("stream")

    plugin_args = {}
    if plugin == 'MultiProc':
        plugin_args['n_procs'] = cpu_count()

    __import__(example)
    for pipeline in pipelines:
        wf = getattr(sys.modules[example], pipeline)
        wf.base_dir = os.path.join(os.getcwd(), 'output', example, plugin)
        if os.path.exists(wf.base_dir):
            rmtree(wf.base_dir)

        # Handle a logging directory
        log_dir = os.path.join(os.getcwd(), 'logs', example)
        if os.path.exists(log_dir):
            rmtree(log_dir)
        os.makedirs(log_dir)
        wf.config = {'execution': {'hash_method': 'timestamp',
                                   'stop_on_first_rerun': 'true',
                                   'write_provenance': 'true'},
                     'logging': {'log_directory': log_dir, 'log_to_file': True}}
        try:
            wf.inputs.inputnode.in_data = os.path.abspath(data_path)
        except AttributeError:
            pass # the workflow does not have inputnode.in_data

        wf.run(plugin=plugin, plugin_args=plugin_args)
        # run twice to check if nothing is rerunning
        wf.run(plugin=plugin)
def summarize_motion(motion_df_file, motion_file, vols_to_censor, TR):
    from nipype import config, logging
    config.enable_debug_mode()
    logging.update_logging(config)
    from os.path import dirname, basename
    from numpy import asarray, mean, insert, zeros, sort
    from pandas import DataFrame, Series, read_csv

    motion_df = read_csv(motion_df_file, index_col=0)

    motion = asarray(open(motion_file).read().splitlines()).astype(float)
    censvols = asarray(open(vols_to_censor).read().splitlines()).astype(int)
    sec_not_censored = (len(motion)-len(censvols))*TR

    if censvols[0]>0:
        periods_not_censored = insert(censvols,0,0)
    else:
        periods_not_censored = censvols

    if periods_not_censored[-1]<len(motion):
        periods_not_censored = insert(periods_not_censored,len(periods_not_censored),len(motion))

    lengths = zeros(len(periods_not_censored)-1)
    for a in range(0,len(lengths)):
        lengths[a] = periods_not_censored[a+1] - periods_not_censored[a] - 1

    lengths = lengths*TR

    # sort lengths in descending order
    lengths = sort(lengths)[::-1]

    fp = dirname(motion_file)
    subject = basename(fp)

    motion_df.loc[subject] = [mean(motion),max(motion),len(censvols),len(motion),sec_not_censored,lengths]
    motion_df.to_csv(motion_df_file)

    return()
Esempio n. 38
0
def MasterProcessingController(argv=None):
    import argparse
    import configparser
    import csv
    import string

    if argv == None:
        argv = sys.argv

    # Create and parse input arguments
    parser = argparse.ArgumentParser(description='Runs a mini version of BRAINSAutoWorkup')
    group = parser.add_argument_group('Required')
    group.add_argument('-pe', action="store", dest='processingEnvironment', required=True,
                       help='The name of the processing environment to use from the config file')
    group.add_argument('-wfrun', action="store", dest='wfrun', required=True,
                       help='The name of the workflow running plugin to use')
    group.add_argument('-subject', action="store", dest='subject', required=True,
                       help='The name of the subject to process')
    group.add_argument('-ExperimentConfig', action="store", dest='ExperimentConfig', required=True,
                       help='The path to the file that describes the entire experiment')
    parser.add_argument('-rewrite_datasinks', action='store_true', default=False,
                        help='Use if the datasinks should be forced rerun.\nDefault: value in configuration file')
    parser.add_argument('--version', action='version', version='%(prog)s 1.0')
    args = parser.parse_args()

    config = configparser.ConfigParser(allow_no_value=True)
    config.read(args.ExperimentConfig)

    # Pipeline-specific information
    GLOBAL_DATA_SINK_REWRITE = setDataSinkRewriteValue(args.rewrite_datasinks,
                                                       config.getboolean('NIPYPE', 'GLOBAL_DATA_SINK_REWRITE'))
    experiment = get_experiment_settings(config)
    # Platform specific information
    environment = get_environment_settings(config)
    if environment['cluster']:
        cluster = get_cluster_settings(config)
    sys.path = environment('PYTHONPATH')
    os.environ['PATH'] = ':'.join(environment['PATH'])
    # Virtualenv
    if not environment['virtualenv_dir'] is None:
        print("Loading virtualenv_dir...")
        execfile(environment['virtualenv_dir'], dict(__file__=environment['virtualenv_dir']))
    ###### Now ensure that all the required packages can be read in from this custom path
    # \/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/
    # print sys.path
    ## Check to ensure that SimpleITK can be found
    import SimpleITK as sitk
    from nipype import config  # NOTE:  This needs to occur AFTER the PYTHON_AUX_PATHS has been modified
    config.enable_debug_mode()  # NOTE:  This needs to occur AFTER the PYTHON_AUX_PATHS has been modified
    # config.enable_provenance()

    ##############################################################################
    from nipype.interfaces.base import CommandLine, CommandLineInputSpec, TraitedSpec, File, Directory
    from nipype.interfaces.base import traits, isdefined, BaseInterface
    from nipype.interfaces.utility import Merge, Split, Function, Rename, IdentityInterface
    import nipype.interfaces.io as nio  # Data i/o
    import nipype.pipeline.engine as pe  # pypeline engine
    from nipype.interfaces.freesurfer import ReconAll

    from nipype.utils.misc import package_check
    # package_check('nipype', '5.4', 'tutorial1') ## HACK: Check nipype version
    package_check('numpy', '1.3', 'tutorial1')
    package_check('scipy', '0.7', 'tutorial1')
    package_check('networkx', '1.0', 'tutorial1')
    package_check('IPython', '0.10', 'tutorial1')

    try:
        verify_empty_freesurfer_env()
    except EnvironmentError:
        raise

    # Define platform specific output write paths
    if not os.path.exists(experiment['output_cache']):
        os.makedirs(experiment['output_cache'])
    if not os.path.exists(experiment['output_results']):
        os.makedirs(experiment['output_results'])
    if 'input_results' in list(experiment.keys()):
        assert os.path.exists(
            experiment['input_results']), "The previous experiment directory does not exist: {0}".format(
            experiment['input_results'])

    # \/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/
    #    Define platform specific output write paths
    mountPrefix = expConfig.get(input_arguments.processingEnvironment, 'MOUNTPREFIX')
    BASEOUTPUTDIR = expConfig.get(input_arguments.processingEnvironment, 'BASEOUTPUTDIR')
    ExperimentBaseDirectoryPrefix = os.path.realpath(os.path.join(BASEOUTPUTDIR, ExperimentName))
    ExperimentBaseDirectoryCache = ExperimentBaseDirectoryPrefix + "_CACHE"
    ExperimentBaseDirectoryResults = ExperimentBaseDirectoryPrefix + "_Results"
    if not os.path.exists(ExperimentBaseDirectoryCache):
        os.makedirs(ExperimentBaseDirectoryCache)
    if not os.path.exists(ExperimentBaseDirectoryResults):
        os.makedirs(ExperimentBaseDirectoryResults)
    if not PreviousExperimentName is None:
        PreviousBaseDirectoryPrefix = os.path.realpath(os.path.join(BASEOUTPUTDIR, PreviousExperimentName))
        PreviousBaseDirectoryResults = PreviousBaseDirectoryPrefix + "_Results"
        assert os.path.exists(
            PreviousBaseDirectoryResults), "The previous experiment directory does not exist: {0}".format(
            PreviousBaseDirectoryResults)
    else:
        PreviousBaseDirectoryResults = None
    # Define workup common reference data sets
    #    The ATLAS needs to be copied to the ExperimentBaseDirectoryPrefix
    #    The ATLAS pathing must stay constant
    ATLASPATH = expConfig.get(input_arguments.processingEnvironment, 'ATLASPATH')
    if not os.path.exists(ATLASPATH):
        print("ERROR:  Invalid Path for Atlas: {0}".format(ATLASPATH))
        sys.exit(-1)
    CACHE_ATLASPATH = os.path.realpath(os.path.join(ExperimentBaseDirectoryCache, 'Atlas'))
    from distutils.dir_util import copy_tree
    if not os.path.exists(CACHE_ATLASPATH):
        print("Copying a reference of the atlas to the experiment cache directory:\n    from: {0}\n    to: {1}".format(
            ATLASPATH, CACHE_ATLASPATH))
        copy_tree(ATLASPATH, CACHE_ATLASPATH, preserve_mode=1, preserve_times=1)
        ## Now generate the xml file with the correct pathing
        file_replace(os.path.join(ATLASPATH, 'ExtendedAtlasDefinition.xml.in'),
                     os.path.join(CACHE_ATLASPATH, 'ExtendedAtlasDefinition.xml'), "@ATLAS_INSTALL_DIRECTORY@",
                     CACHE_ATLASPATH)
    else:
        print("Atlas already exists in experiment cache directory: {0}".format(CACHE_ATLASPATH))

    ## Set custom environmental variables so that subproceses work properly (i.e. for FreeSurfer)
    CUSTOM_ENVIRONMENT = eval(environment['misc'])
    # print CUSTOM_ENVIRONMENT
    for key, value in list(CUSTOM_ENVIRONMENT.items()):
        # print "SETTING: ", key, value
        os.putenv(key, value)
        os.environ[key] = value
    # print os.environ
    # sys.exit(-1)

    WORKFLOW_COMPONENTS = experiment['components']
    if 'FREESURFER' in WORKFLOW_COMPONENTS:
        check_freesurfer_environment()

    cluster = setup_cpu(args.wfrun, config)  # None unless wfrun is 'helium*' or 'ipl_OSX', then dict()

    print("Configuring Pipeline")
    ## Ensure that entire db is built and cached before parallel section starts.
    _ignoreme = OpenSubjectDatabase(experiment['output_cache'], ["all"], environment['prefix'],
                                    environment['subject_data_file'])
    to_do_subjects = args.subject.split(',')
    if to_do_subjects[0] == "all":
        to_do_subjects = _ignoreme.getAllSubjects()
    _ignoreme = None

    ## Create the shell wrapper script for ensuring that all jobs running on remote hosts from SGE
    #  have the same environment as the job submission host.

    JOB_SCRIPT = get_global_sge_script(sys.path, os.environ['PATH'], CUSTOM_ENVIRONMENT, MODULES)
    print(JOB_SCRIPT)

    # Randomly shuffle to_do_subjects to get max
    import random
    random.shuffle(to_do_subjects)

    ## Make a list of all the arguments to be processed
    sp_args_list = list()
    start_time = time.time()
    subj_index = 1
    for subjectid in to_do_subjects:
        delay = 2.5 * subj_index
        subj_index += 1
        print("START DELAY: {0}".format(delay))
        sp_args = (CACHE_ATLASPATH, CLUSTER_QUEUE, CLUSTER_QUEUE_LONG, QSTAT_IMMEDIATE_EXE, QSTAT_CACHED_EXE,
                   experiment['output_cache'], experiment['output_results'], environment['subject_data_file'],
                   GLOBAL_DATA_SINK_REWRITE, JOB_SCRIPT, WORKFLOW_COMPONENTS, args,
                   mountPrefix, start_time + delay, subjectid, PreviousBaseDirectoryResult)
        sp_args_list.append(sp_args)
    if 'local' in args.wfrun:
        print("RUNNING WITHOUT POOL BUILDING")
        for sp_args in sp_args_list:
            DoSingleSubjectProcessing(sp_args)
    else:
        ## Make a pool of workers to submit simultaneously
        from multiprocessing import Pool
        myPool = Pool(processes=64, maxtasksperchild=1)
        all_results = myPool.map_async(DoSingleSubjectProcessing, sp_args_list).get(1e100)

        for indx in range(0, len(sp_args_list)):
            if all_results[indx] == False:
                print("FAILED for {0}".format(sp_args_list[indx][-1]))

    print("THIS RUN OF BAW FOR SUBJS {0} HAS COMPLETED".format(to_do_subjects))
    return 0
Esempio n. 39
0
def main(argv=None):
    import os
    import sys
    import nipype.pipeline.engine as pe
    from nipype.interfaces.utility import Function
    import ConfigurationParser

    from nipype import config
    config.enable_debug_mode()

    workflow = pe.Workflow(name='crossValidation')
    workflow.base_dir = '.'

    #-------------------------------- argument parser
    import argparse
    argParser = argparse.ArgumentParser( description="""****************************
        10-cross validation command line argument parser
        """)
    # workup arguments
    argWfGrp = argParser.add_argument_group( 'argWfGrp', """****************************
        auto workflow arguments for cross validation
        """)
    argWfGrp.add_argument('--crossValidationConfigurationFilename',
                          help="""configurationFilename
        Configuration file name with FULL PATH""",
                          dest='crossValidationConfigurationFilename', required=True)
    argWfGrp.add_argument( '--baseDir',    help="""baseDir
        """,
                           dest='baseDir', required=False, default=".")
    argWfGrp.add_argument( '--runOption',    help="""runOption [local/cluster]
        """,
                           dest='runOption', required=False, default="local")
    argWfGrp.add_argument( '--PythonBinDir',    help="""PythonBinDir [local/cluster]
        """,
                           dest='PythonBinDir', required=False, default="NA")
    argWfGrp.add_argument( '--BRAINSToolsSrcDir',    help="""BRAINSToolsSrcDir [local/cluster]
        """,
                           dest='BRAINSToolsSrcDir', required=False, default="NA")
    argWfGrp.add_argument( '--BRAINSToolsBuildDir',    help="""BRAINSToolsBuildDir [local/cluster]
        """,
                           dest='BRAINSToolsBuildDir', required=False, default="NA")

    # test arguments
    argTestGrp = argParser.add_argument_group( 'argTestGrp', """****************************
        arguments for testing
        """)
    argTestGrp.add_argument('--unitTest', action='store_true',
                            dest='unitTest', help="""****************************
        List of test function name
        """)
    args = argParser.parse_args()

    #--------------------------------
    if not args.unitTest:
        crossValidationWorkUp(args.crossValidationConfigurationFilename,
                              args.baseDir,
                              args.runOption,
                              args.PythonBinDir,
                              args.BRAINSToolsSrcDir,
                              args.BRAINSToolsBuildDir)

    #--------------------------------
    if args.unitTest:
        testElementPerSubject = [3, 4, 5]
        getStartAndEndIndex(0, testElementPerSubject)
        getStartAndEndIndex(1, testElementPerSubject)
        getStartAndEndIndex(2, testElementPerSubject)

        featureDict = {'GadSG': 'testGadFeatureList.csv',
                       't2': 't2FeatureList.csv'}

        sessionList = ["s1", "s2", "s3", "s4", "s5", "s6", "s7", "s8", "s9", "s10", "s11", "s12"]
        getRandomizedSessionOrder(sessionList)
        myTag = getTags(sessionList,
                        2,
                        testElementPerSubject)
        featureFilenameDict = {'f1': 'f1.csv', 'f2': 'f2.csv'}
        configFilename, mainFilenameDict, featureFilenameDict = generateNewFilenames(3,
                                                                                     list(featureFilenameDict.keys()),
                                                                                     "outputPrefix")
        import ConfigurationParser
        m_configurationMap = ConfigurationParser.ConfigurationSectionMap(args.crossValidationConfigurationFilename)

        listFiles = m_configurationMap['ListFiles']
        mainListFilename = listFiles['subjectListFilename'.lower()]
        sessionDict = readListFileBySessionID(mainListFilename)
        myTag = getTags(list(sessionDict.keys()),
                        2,
                        listFiles['numberOfElementInSubset'.lower()])
        writeListFile(sessionDict,
                      mainFilenameDict,
                      myTag)
Esempio n. 40
0
def pipeline(args):
    if args['debug']:
        config.enable_debug_mode()
    config.update_config({'logging': {'log_directory':makeSupportDir(args['name'], "logs")}})
    logging.update_logging(config)

    # CONSTANTS
    sessionID = args['session']
    outputType = args['format'].upper()
    fOutputType = args['freesurfer']
    preprocessOn = args['preprocess']
    maskGM = args['maskgm']
    maskWholeBrain = args['maskwb']
    maskWhiteMatterFromSeeds = args['maskseeds']
    # print args['name']
    t1_experiment = "20141001_PREDICTHD_long_Results"  #"20130729_PREDICT_Results"
    atlasFile = os.path.abspath(os.path.join(os.path.dirname(__file__), "ReferenceAtlas", "template_t1.nii.gz"))
    wholeBrainFile = os.path.abspath(os.path.join(os.path.dirname(__file__), "ReferenceAtlas", "template_brain.nii.gz"))
    atlasLabel = os.path.abspath(os.path.join(os.path.dirname(__file__), "ReferenceAtlas", "template_nac_labels.nii.gz"))
    resampleResolution = (2.0, 2.0, 2.0)
    downsampledfilename = 'downsampled_atlas.nii.gz'

    master = pipe.Workflow(name=args['name'] + "_CACHE")
    master.base_dir = os.path.abspath("/Shared/sinapse/CACHE")

    sessions = pipe.Node(interface=IdentityInterface(fields=['session_id']), name='sessionIDs')
    sessions.iterables = ('session_id', sessionID)
    downsampleAtlas = pipe.Node(interface=Function(function=resampleImage,
                                                   input_names=['inputVolume', 'outputVolume', 'resolution'],
                                                   output_names=['outputVolume']),
                                name="downsampleAtlas")
    downsampleAtlas.inputs.inputVolume = atlasFile
    downsampleAtlas.inputs.outputVolume = downsampledfilename
    downsampleAtlas.inputs.resolution = [int(x) for x in resampleResolution]

    # HACK: Remove node from pipeline until Nipype/AFNI file copy issue is resolved
    # fmri_DataSink = pipe.Node(interface=DataSink(), name="fmri_DataSink")
    # fmri_DataSink.overwrite = REWRITE_DATASINKS
    # Output to: /Shared/paulsen/Experiments/YYYYMMDD_<experiment>_Results/fmri
    # fmri_DataSink.inputs.base_directory = os.path.join(master.base_dir, RESULTS_DIR, 'fmri')
    # fmri_DataSink.inputs.substitutions = [('to_3D_out+orig', 'to3D')]
    # fmri_DataSink.inputs.parameterization = False
    #
    # master.connect([(sessions, fmri_DataSink, [('session_id', 'container')])])
    # END HACK

    registration = registrationWorkflow.workflow(t1_experiment, outputType, name="registration_wkfl")
    master.connect([(sessions, registration, [('session_id', "inputs.session_id")])])

    detrend = afninodes.detrendnode(outputType, 'afni3Ddetrend')
    # define grabber
    site = "*"
    subject = "*"
    if preprocessOn:
        grabber = dataio.iowaGrabber(t1_experiment, site, subject, maskGM, maskWholeBrain)
        master.connect([(sessions, grabber, [('session_id', 'session_id')]),
                         (grabber, registration,     [('t1_File', 'inputs.t1')])])
        # Why isn't preprocessWorkflow.workflow() used instead? It would avoid most of the nuisance connections here...
        preprocessing = preprocessWorkflow.prepWorkflow(skipCount=6, outputType=outputType)
        name = args.pop('name')  # HACK: prevent name conflict with nuisance workflow
        nuisance = nuisanceWorkflow.workflow(outputType=outputType, **args)
        args['name'] = name  # END HACK
        master.connect([(grabber, preprocessing,      [('fmri_dicom_dir', 'to_3D.infolder'),
                                                       ('fmri_dicom_dir', 'formatFMRINode.dicomDirectory')]),
                        (grabber, nuisance,           [('whmFile', 'wm.warpWMtoFMRI.input_image')]),
                        (preprocessing, registration, [('merge.out_file', 'inputs.fmri'),  # 7
                                                       ('automask.out_file', 'tstat.mask_file')]),  # *optional*
                        (registration, nuisance,      [('outputs.fmri_reference', 'csf.warpCSFtoFMRI.reference_image'),  # CSF
                                                       ('outputs.nac2fmri_list', 'csf.warpCSFtoFMRI.transforms'),
                                                       ('outputs.fmri_reference', 'wm.warpWMtoFMRI.reference_image'),    # WM
                                                       ('outputs.t12fmri_list', 'wm.warpWMtoFMRI.transforms')]),
                        ])
        warpCSFtoFMRInode = nuisance.get_node('csf').get_node('warpCSFtoFMRI')
        warpCSFtoFMRInode.inputs.input_image = atlasFile
        if maskGM:
            master.connect([(grabber, nuisance,       [('gryFile', 'gm.warpGMtoFMRI.input_image')]),
                            (registration, nuisance,  [('outputs.fmri_reference', 'gm.warpGMtoFMRI.reference_image'),
                                                       ('outputs.t12fmri_list', 'gm.warpGMtoFMRI.transforms')]),
                            (preprocessing, nuisance, [('calc.out_file', 'gm.afni3DmaskAve_grm.in_file'),
                                                       ('volreg.oned_file', 'afni3Ddeconvolve.stim_file_4')])])
        elif maskWholeBrain:
            master.connect([(registration, nuisance,  [('outputs.fmri_reference', 'wb.warpBraintoFMRI.reference_image'),
                                                       ('outputs.nac2fmri_list', 'wb.warpBraintoFMRI.transforms')]),
                            (preprocessing, nuisance, [('calc.out_file', 'wb.afni3DmaskAve_whole.in_file'),
                                                       ('volreg.oned_file', 'afni3Ddeconvolve.stim_file_4')])])
            warpBraintoFMRInode = nuisance.get_node('wb').get_node('warpBraintoFMRI')
            warpBraintoFMRInode.inputs.input_image= wholeBrainFile
        else:
            master.connect([(preprocessing, nuisance, [('volreg.oned_file', 'afni3Ddeconvolve.stim_file_3')])])

        master.connect([(preprocessing, nuisance, [('calc.out_file', 'wm.afni3DmaskAve_wm.in_file'),
                                                   ('calc.out_file', 'csf.afni3DmaskAve_csf.in_file'),
                                                   ('calc.out_file', 'afni3Ddeconvolve.in_file')]),
                        (nuisance, detrend,       [('afni3Ddeconvolve.out_errts', 'in_file')])])  # 13
    else:
        cleveland_grabber = dataio.clevelandGrabber()
        grabber = dataio.autoworkupGrabber(t1_experiment, site, subject)
        converter = pipe.Node(interface=Copy(), name='converter')  # Convert ANALYZE to AFNI

        master.connect([(sessions, grabber,            [('session_id', 'session_id')]),
                         (grabber, registration,        [('t1_File', 'inputs.t1')]),
                         (sessions, cleveland_grabber,  [('session_id', 'session_id')]),
                         (cleveland_grabber, converter, [('fmriHdr', 'in_file')]),
                         (converter, registration,      [('out_file', 'inputs.fmri')]),
                         (converter, detrend,           [('out_file', 'in_file')]),  # in fMRI_space
                        ])

    t1_wf = registrationWorkflow.t1Workflow()
    babc_wf = registrationWorkflow.babcWorkflow()
    # HACK: No EPI
    # epi_wf = registrationWorkflow.epiWorkflow()
    lb_wf = registrationWorkflow.labelWorkflow()
    seed_wf = registrationWorkflow.seedWorkflow()
    bandpass = afninodes.fouriernode(outputType, 'fourier') # Fourier is the last NIFTI file format in the AFNI pipeline

    master.connect([(detrend, bandpass,       [('out_file', 'in_file')]), # Per Dawei, bandpass after running 3dDetrend
                     (grabber, t1_wf,         [('t1_File', 'warpT1toFMRI.input_image')]),
                     (registration, t1_wf,    [('outputs.fmri_reference', 'warpT1toFMRI.reference_image'),  # T1
                                               ('outputs.t12fmri_list', 'warpT1toFMRI.transforms')]),
                     (grabber, babc_wf,       [('csfFile', 'warpBABCtoFMRI.input_image')]),
                     (registration, babc_wf,  [('outputs.fmri_reference', 'warpBABCtoFMRI.reference_image'),  # Labels
                                               ('outputs.t12fmri_list', 'warpBABCtoFMRI.transforms')]),
                     # HACK: No EPI
                     # (downsampleAtlas, epi_wf, [('outputVolume', 'warpEPItoNAC.reference_image')]),
                     # (registration, epi_wf,    [('outputs.fmri2nac_list', 'warpEPItoNAC.transforms')]),
                     # (bandpass, epi_wf,         [('out_file', 'warpEPItoNAC.input_image')]),
                     # END HACK
                     (downsampleAtlas, lb_wf, [('outputVolume', 'warpLabeltoNAC.reference_image')]),
                     (registration, lb_wf,    [('outputs.fmri2nac_list', 'warpLabeltoNAC.transforms')]),
                     (t1_wf, seed_wf,         [('warpT1toFMRI.output_image', 'warpSeedtoFMRI.reference_image')]),
                     (registration, seed_wf,  [('outputs.nac2fmri_list', 'warpSeedtoFMRI.transforms')]),
                     ])

    renameMasks = pipe.Node(interface=Rename(format_string='%(label)s_mask'), name='renameMasksAtlas')
    renameMasks.inputs.keep_ext = True
    atlas_DataSink = dataio.atlasSink(base_directory=master.base_dir, **args)
    master.connect([(renameMasks, atlas_DataSink,     [('out_file', 'Atlas')]),
                    (downsampleAtlas, atlas_DataSink, [('outputVolume', 'Atlas.@resampled')]),
                    ])

    renameMasks2 = pipe.Node(interface=Rename(format_string='%(session)s_%(label)s_mask'), name='renameMasksFMRI')
    renameMasks2.inputs.keep_ext = True
    master.connect(sessions, 'session_id', renameMasks2, 'session')

    clipSeedWithVentriclesNode = pipe.Node(interface=Function(function=clipSeedWithVentricles,
                                           input_names=['seed', 'label', 'outfile'],
                                           output_names=['clipped_seed_fn']),
                                           name='clipSeedWithVentriclesNode')
    clipSeedWithVentriclesNode.inputs.outfile = "clipped_seed.nii.gz"

    master.connect(seed_wf, 'warpSeedtoFMRI.output_image', clipSeedWithVentriclesNode, 'seed')
    master.connect(babc_wf, 'warpBABCtoFMRI.output_image', clipSeedWithVentriclesNode, 'label')
    if not maskWhiteMatterFromSeeds:
        master.connect(clipSeedWithVentriclesNode, 'clipped_seed_fn', renameMasks2, 'in_file')
    else:
        clipSeedWithWhiteMatterNode = pipe.Node(interface=Function(function=clipSeedWithWhiteMatter,
                                                                   input_names=['seed', 'mask', 'outfile'],
                                                                   output_names=['outfile']),
                                                name='clipSeedWithWhiteMatterNode')
        clipSeedWithWhiteMatterNode.inputs.outfile = 'clipped_wm_seed.nii.gz'
        master.connect(babc_wf, 'warpBABCtoFMRI.output_image', clipSeedWithWhiteMatterNode, 'mask')
        master.connect(clipSeedWithVentriclesNode, 'clipped_seed_fn', clipSeedWithWhiteMatterNode, 'seed')
        master.connect(clipSeedWithWhiteMatterNode, 'outfile', renameMasks2, 'in_file')
    # Labels are iterated over, so we need a seperate datasink to avoid overwriting any preprocessing
    # results when the labels are iterated (e.g. To3d output)
    # Write out to: /Shared/sinapse/CACHE/YYYYMMDD_<experiment>_Results/<SESSION>
    fmri_label_DataSink = dataio.fmriSink(master.base_dir, **args)
    master.connect(sessions, 'session_id', fmri_label_DataSink, 'container')
    master.connect(renameMasks2, 'out_file', fmri_label_DataSink, 'masks')
    master.connect(bandpass,'out_file', fmri_label_DataSink, 'masks.@bandpass')

    roiMedian = afninodes.maskavenode('AFNI_1D', 'afni_roiMedian', '-mrange 1 1')
    master.connect(renameMasks2, 'out_file', roiMedian, 'mask')
    master.connect(bandpass, 'out_file', roiMedian, 'in_file')

    correlate = afninodes.fimnode('Correlation', 'afni_correlate')
    master.connect(roiMedian, 'out_file', correlate, 'ideal_file')
    master.connect(bandpass, 'out_file', correlate, 'in_file')

    regionLogCalc = afninodes.logcalcnode(outputType, 'afni_regionLogCalc')
    master.connect(correlate, 'out_file', regionLogCalc, 'in_file_a')

    renameZscore = pipe.Node(interface=Rename(format_string="%(session)s_%(label)s_zscore"), name='renameZscore')
    renameZscore.inputs.keep_ext = True
    master.connect(sessions, 'session_id', renameZscore, 'session')
    master.connect(regionLogCalc, 'out_file', renameZscore, 'in_file')
    master.connect(renameZscore, 'out_file', fmri_label_DataSink, 'zscores')
    master.connect(t1_wf, 'warpT1toFMRI.output_image', fmri_label_DataSink, 'zscores.@t1Underlay')

    # Move z values back into NAC atlas space
    # master.connect(downsampleAtlas, 'outputVolume', lb_wf, 'warpLabeltoNAC.reference_image')
    master.connect(regionLogCalc, 'out_file', lb_wf, 'warpLabeltoNAC.input_image')

    renameZscore2 = pipe.Node(interface=Rename(format_string="%(session)s_%(label)s_result"), name='renameZscore2')
    renameZscore2.inputs.keep_ext = True
    master.connect(sessions, 'session_id', renameZscore2, 'session')
    master.connect(lb_wf, 'warpLabeltoNAC.output_image', renameZscore2, 'in_file')
    master.connect(renameZscore2, 'out_file', atlas_DataSink, 'Atlas.@zscore')
    # Connect seed subworkflow
    seedSubflow = seedWorkflow.workflow(args['seeds'], outputType='NIFTI_GZ', name='seed_wkfl')
    master.connect([(downsampleAtlas, seedSubflow,    [('outputVolume', 'afni3Dcalc_seeds.in_file_a')]),
                     (seedSubflow, renameMasks,        [('afni3Dcalc_seeds.out_file', 'in_file'),
                                                        ('selectLabel.out', 'label')]),
                     (seedSubflow, renameMasks2,       [('selectLabel.out', 'label')]),
                     (seedSubflow, renameZscore,       [('selectLabel.out', 'label')]),
                     (seedSubflow, renameZscore2,      [('selectLabel.out', 'label')]),
                     (seedSubflow, seed_wf,               [('afni3Dcalc_seeds.out_file', 'warpSeedtoFMRI.input_image')])
                    ])
    imageDir = makeSupportDir(args['name'], "images")
    if args['plot']:
        registration.write_graph(dotfilename=os.path.join(imageDir, 'register.dot'), graph2use='orig', format='png',
                                 simple_form=False)
        if preprocessOn:
            preprocessing.write_graph(dotfilename=os.path.join(imageDir, 'preprocess.dot'), graph2use='orig', format='png',
                                      simple_form=False)
            nuisance.write_graph(dotfilename=os.path.join(imageDir, 'nuisance.dot'), graph2use='orig', format='png',
                                 simple_form=False)
        seedSubflow.write_graph(dotfilename=os.path.join(imageDir, 'seed.dot'), graph2use='orig', format='png',
                                 simple_form=False)
        master.write_graph(dotfilename=os.path.join(imageDir, 'master.dot'), graph2use="orig", format='png', simple_form=False)
    elif args['debug']:
        try:
            master.run(updatehash=True)
            # Run restingState on the all threads
            # Setup environment for CPU load balancing of ITK based programs.
            # --------
            # import multiprocessing
            # total_CPUS = 10  # multiprocessing.cpu_count()
            # master.run(plugin='MultiProc', plugin_args={'n_proc': total_CPUS})  #, updatehash=True)
            # --------
            # Run restingState on the local cluster
            # master.run(plugin='SGE', plugin_args={'template': os.path.join(os.getcwd(), 'ENV/bin/activate'),
            #                                        'qsub_args': '-S /bin/bash -cwd'})  #, updatehash=True)
        except:
            pass
        master.name = "master"  # HACK: Bug in Graphviz for nodes beginning with numbers
        master.write_graph(dotfilename=os.path.join(imageDir, 'debug_hier.dot'), graph2use="colored", format='png')
        master.write_graph(dotfilename=os.path.join(imageDir, 'debug_orig.dot'), graph2use="flat", format='png')
    else:
        import multiprocessing
        total_CPUS = multiprocessing.cpu_count()
        master.run(plugin='MultiProc', plugin_args={'n_proc': total_CPUS})  #, updatehash=True)
    return 0
Esempio n. 41
0
def crossValidationWorkUp(crossValidationConfigurationFilename,
                          baseDir,
                          runOption,
                          PythonBinDir,
                          BRAINSToolsSrcDir,
                          BRAINSToolsBuildDir):
    print("""****************************
          crossValidationWorkUp
          """)
    from collections import OrderedDict  # Need OrderedDict internally to ensure consistent ordering
    from nipype import config
    config.enable_debug_mode()

    import crossValidation as this
    import ConfigurationParser
    myConfigurationMap = ConfigurationParser.ConfigurationSectionMap(
        crossValidationConfigurationFilename)

    import nipype.pipeline.engine as pe
    from nipype.interfaces.utility import Function
    import ast
    print( """ before
           createeachvalidationunitnd
           """)
    createConfigurationFiles = pe.Node(name="createConfigurationFiles",
                                       interface=Function(
                                           input_names=['inputConfigurationFilename',
                                                        'outputConfigurationFilenamePrefix'],
                                           output_names=['outputConfigFilenameDict'],
                                           function=this.createConfigurationFileForCrossValidationUnitTest)
                                       )

    preprocessing = pe.Workflow(name='Preprocessing')
    preprocessing.base_dir = baseDir + "/PreprocessingDir"

    createConfigurationFiles.inputs.inputConfigurationFilename = crossValidationConfigurationFilename
    createConfigurationFiles.inputs.outputConfigurationFilenamePrefix = 'createConfigurationFiles'

    extractConfigurationFileListND = pe.Node(name="extractConfigurationFileListND",
                                             interface=Function(
                                                  input_names=['configurationFiledict'],
                                                  output_names=['configurationFileList'],
                                                  function=this.extractConfigFile)
                                             )
    preprocessing.connect(createConfigurationFiles, 'outputConfigFilenameDict',
                          extractConfigurationFileListND, 'configurationFiledict')

    preprocessing.run()

    #------------------------------------------------------------------------------------
    # Data graber for outputs
    #
    import nipype.interfaces.io as nio
    dg = nio.DataGrabber()
    dg.inputs.base_directory = baseDir + "/PreprocessingDir/Preprocessing/createConfigurationFiles/"
    dg.inputs.template = "*config"
    mainConfigFiles = dg.run()

    print((mainConfigFiles.outputs.outfiles))
    print((mainConfigFiles.outputs.outfiles))
    print((mainConfigFiles.outputs.outfiles))
    print((mainConfigFiles.outputs.outfiles))

    #------------------------------------------------------------------------------------
    workflow = pe.Workflow(name='crossValidationWF')
    workflow.base_dir = baseDir

    #------------------------------------------------------------------------------------
    # Generate Probability Map
    #
    Options = myConfigurationMap['Options']
    roiDict = Options['roiBooleanCreator'.lower()]

    #-------------------------------- probMapFilenameGenerator is dummy node
    # to create proper probability file location for nipype
    #
    print("""************************
          probMapFilenameGenerator
          """)

    probMapFilenameGenerator = pe.Node(name="probMapFilenameGenerator",
                                       interface=Function(
                                           input_names=['roiList'],
                                           output_names=['probabilityMapFilename'],
                                           function=this.getProbabilityMapFilename)
                                       )
    print(roiDict)
    probMapFilenameGenerator.inputs.roiList = list(roiDict.keys())
    print("""************************
          probabilityMapGeneratorND
          """)

    #
    #--------------------------------  start from generate probability
    #
    probabilityMapGeneratorND = pe.Node(name="probabilityMapGeneratorND",
                                        interface=Function(
                                             input_names=['configurationFilename',
                                                          'probabilityMapDict',
                                                          'gaussianSigma',
                                                          'outputXmlFilename'],
                                             output_names=['probabilityMapDict',
                                                           'outputXmlFilename',
                                                           'outputConfigurationFilename'],
                                             function=ConfigurationParser.BRAINSCutGenerateProbabilityMap)
                                        )

    probabilityMapGeneratorND.inputs.outputXmlFilename = 'netConfiguration.xml'

    gaussianSigmaParam = ast.literal_eval(Options['gaussianSigma'.lower()])
    print (gaussianSigmaParam)
    probabilityMapGeneratorND.iterables = ('configurationFilename', mainConfigFiles.outputs.outfiles)
    probabilityMapGeneratorND.inputs.gaussianSigma = gaussianSigmaParam

    workflow.connect(probMapFilenameGenerator, 'probabilityMapFilename',
                     probabilityMapGeneratorND, 'probabilityMapDict')

    #
    #--------------------------------  create vectors for each ROI
    #
    print("""************************
          configFileND
          """)
    configFileND = pe.Node(name="configFileND",
                           interface=Function(
                                input_names=['originalFilename',
                                             'editedFilenamePrefix'],
                                output_names=['editedFilenames'],
                                function=ConfigurationParser.ConfigurationFileEditor)
                           )

    configFileND.inputs.editedFilenamePrefix = 'ROI'
    workflow.connect(probabilityMapGeneratorND, 'outputConfigurationFilename',
                     configFileND, 'originalFilename')

    vectorCreatorND = pe.MapNode(name="vectorCreatorND",
                                 interface=Function(
                                      input_names=['configurationFilename',
                                                   'probabilityMapDict',
                                                   'normalization',
                                                   'outputXmlFilename',
                                                   'outputVectorFilename'],
                                      output_names=['outputVectorFilename',
                                                    'outputVectorHdrFilename',
                                                    'outputNormalization',
                                                    'outputXmlFilename'],
                                      function=ConfigurationParser.BRAINSCutCreateVector),
                                 iterfield=['configurationFilename']
                                 )
    vectorCreatorND.inputs.outputVectorFilename = 'oneROIVectorFile.txt'
    vectorCreatorND.inputs.outputXmlFilename = 'oneROICreateVectorNetConfiguration.xml'
    normalizationOption = Options['normalization'.lower()]
    print(( """Normalization Option: {str}
           """.format( str=normalizationOption ) ))
    vectorCreatorND.iterables = ('normalization', normalizationOption)
    #
    #--------------------------------  workflow connections
    #
    workflow.connect(configFileND, 'editedFilenames',
                     vectorCreatorND, 'configurationFilename')
    workflow.connect(probabilityMapGeneratorND, 'probabilityMapDict',
                     vectorCreatorND, 'probabilityMapDict')

    #
    #--------------------------------  balance and combine each ROI vectors
    #
    print("""************************
          balanceND
          """)
    balaceND = pe.Node(name="balanceND",
                       interface=Function(
                            input_names=['inputVectorFilenames'],
                            output_names=['outputVectorFilenames',
                                          'outputVectorHdrFilenames'],
                            function=ConfigurationParser.BalanceInputVectors)
                       )
    workflow.connect(vectorCreatorND, 'outputVectorFilename',
                     balaceND, 'inputVectorFilenames')

    combineND = pe.Node(name="combineND",
                        interface=Function(
                            input_names=['inputVectorFilenames',
                                         'outputVectorFilename'],
                            output_names=['outputVectorFilename',
                                          'outputVectorHdrFilename'],
                            function=ConfigurationParser.CombineInputVectors)
                        )
    workflow.connect(balaceND, 'outputVectorFilenames',
                     combineND, 'inputVectorFilenames')

    combineND.inputs.outputVectorFilename = 'allCombinedVector.txtANN'
    #
    #--------------------------------  train
    #
    print("""************************
          trainND
          """)
    trainND = pe.Node(name="trainND",
                      interface=Function(
                           input_names=['configurationFilename',
                                        'inputVectorFilename',
                                        'outputModelFilenamePrefix',
                                        'outputXmlFilename',
                                        'methodParameter'],
                           output_names=['outputTrainedModelFilename',
                                         'outputMethodParameter'],
                           function=ConfigurationParser.BRAINSCutTrainModel)
                      )
    # methodParameter = { '--method': 'RandomForest',
    #                    '--numberOfTrees': 60,
    #                    '--randomTreeDepth ': 60 }
    methodFromConfiguFile = Options['modelParameter'.lower()]
    trainND.iterables = ('methodParameter', methodFromConfiguFile)

    trainND.inputs.outputXmlFilename = 'trianNetConfiguration.xml'
    trainND.inputs.outputModelFilenamePrefix = 'trainModelFile.txt'

    workflow.connect(probabilityMapGeneratorND, 'outputConfigurationFilename',
                     trainND, 'configurationFilename')
    workflow.connect(combineND, 'outputVectorFilename',
                     trainND, 'inputVectorFilename')
    #
    #--------------------------------  apply
    #
    applyND = pe.Node(name="applyND",
                      interface=Function(
                           input_names=['configurationFilename',
                                        'probabilityMapDict',
                                        'normalization',
                                        'inputModelFilename',
                                        'methodParameter',
                                        'outputXmlFilename'
                                        ],
                           output_names=['outputLabelDict'],
                           function=ConfigurationParser.BRAINSCutApplyModel)
                      )
    # methodParameter = { '--method': 'RandomForest',
    #                    '--numberOfTrees': 60,
    #                    '--randomTreeDepth ': 60 }
    applyND.inputs.outputXmlFilename = 'applyConfiguration.xml'
    workflow.connect(probabilityMapGeneratorND, 'outputConfigurationFilename',
                     applyND, 'configurationFilename')
    workflow.connect(vectorCreatorND, 'outputNormalization',
                     applyND, 'normalization')
    workflow.connect(probabilityMapGeneratorND, 'probabilityMapDict',
                     applyND, 'probabilityMapDict')
    workflow.connect(trainND, 'outputTrainedModelFilename',
                     applyND, 'inputModelFilename')
    workflow.connect(trainND, 'outputMethodParameter',
                     applyND, 'methodParameter')

    #####################################################################################
    # Data Sink
    #
    import os
    LabelsDS = pe.Node(nio.DataSink(), name='LabelDS')
    LabelsDS.inputs.base_directory = os.path.join(baseDir, "Result")
    LabelsDS.inputs.regexp_substitutions = [('/_', '/'),
                                            ('configurationFilename.*_Test', 'Test'),
                                            ('_configuration.config/normalization_', '/'),
                                            ('methodParameter_--method', ''),
                                            ('RandomForest', 'RF/'),
                                            ('.--randomTreeDepth', 'TreeDepth'),
                                            ('.--numberOfTrees', '_TreeNumber'),
                                            ('ANNContinuousPrediction(?P<roi>.+)(?P<session>\d\d\d\d\d).nii.gz', r'\g<session>_\g<roi>_ANNContinuous.nii.gz')
                                            ]
    # ANNContinuousPredictionl_accumben77478

    workflow.connect([(applyND, LabelsDS,
                       [(('outputLabelDict', getDictionaryValues), 'Labels')])])

    #####################################################################################
    # analysis
    #

    #####################################################################################
    # Running
    #
    if runOption == "cluster":
        ############################################
        # Platform specific information
        #     Prepend the python search paths
        pythonPath = BRAINSToolsSrcDir + "/BRAINSCut/BRAINSFeatureCreators/RobustStatisticComputations:" + BRAINSToolsSrcDir + "/AutoWorkup/:" + BRAINSToolsSrcDir + "/AutoWorkup/BRAINSTools/:" + BRAINSToolsBuildDir + "/SimpleITK-build/bin/" + \
            BRAINSToolsBuildDir + "/SimpleITK-build/lib:" + PythonBinDir
        binPath = BRAINSToolsBuildDir + "/bin:" + BRAINSToolsBuildDir + "/lib"

        PYTHON_AUX_PATHS = pythonPath
        PYTHON_AUX_PATHS = PYTHON_AUX_PATHS.split(':')
        PYTHON_AUX_PATHS.extend(sys.path)
        sys.path = PYTHON_AUX_PATHS
        # print sys.path
        import SimpleITK as sitk
        #     Prepend the shell environment search paths
        PROGRAM_PATHS = binPath
        PROGRAM_PATHS = PROGRAM_PATHS.split(':')
        import os
        PROGRAM_PATHS.extend(os.environ['PATH'].split(':'))
        os.environ['PATH'] = ':'.join(PROGRAM_PATHS)

        Cluster_Script = get_global_sge_script(PYTHON_AUX_PATHS,
                                               PROGRAM_PATHS,
                                               {}
                                               )
        workflow.run(plugin='SGE',
                     plugin_args=OrderedDict(template=Cluster_Script,
                                      qsub_args="-S /bin/bash -pe smp 4-8 -o /dev/null "))
    else:
        print("""************************
              run
              """)
        try:
            workflow.write_graph(graph2use='flat')
        except:
            pass
        workflow.run()
Esempio n. 42
0
def MasterProcessingController(argv=None):
    import argparse
    import ConfigParser
    import csv
    import string

    if argv == None:
        argv = sys.argv

    # Create and parse input arguments
    parser = argparse.ArgumentParser(description='Runs a mini version of BRAINSAutoWorkup')
    group = parser.add_argument_group('Required')
    group.add_argument('-pe', action="store", dest='processingEnvironment', required=True,
                       help='The name of the processing environment to use from the config file')
    group.add_argument('-wfrun', action="store", dest='wfrun', required=True,
                       help='The name of the workflow running plugin to use')
    group.add_argument('-subject', action="store", dest='subject', required=True,
                       help='The name of the subject to process')
    group.add_argument('-ExperimentConfig', action="store", dest='ExperimentConfig', required=True,
                       help='The path to the file that describes the entire experiment')
    parser.add_argument('-rewrite_datasinks', action='store_true', default=False,
                        help='Use if the datasinks should be forced rerun.\nDefault: value in configuration file')
    parser.add_argument('--version', action='version', version='%(prog)s 1.0')
    input_arguments = parser.parse_args()

    expConfig = ConfigParser.ConfigParser()
    expConfig.read(input_arguments.ExperimentConfig)

    # Pipeline-specific information
    GLOBAL_DATA_SINK_REWRITE_FROM_CONFIG = expConfig.getboolean('PIPELINE', 'GLOBAL_DATA_SINK_REWRITE')
    GLOBAL_DATA_SINK_REWRITE = setDataSinkRewriteValue(input_arguments.rewrite_datasinks, GLOBAL_DATA_SINK_REWRITE_FROM_CONFIG)

    # Experiment specific information
    subject_data_file = expConfig.get('EXPERIMENT_DATA', 'SESSION_DB')
    ExperimentName = expConfig.get('EXPERIMENT_DATA', 'EXPERIMENTNAME')
    if expConfig.has_option('EXPERIMENT_DATA', 'PREVIOUSEXPERIMENTNAME'):
        PreviousExperimentName = expConfig.get('EXPERIMENT_DATA', 'PREVIOUSEXPERIMENTNAME')
    else:
         PreviousExperimentName = None

    # Platform specific information
    #     Prepend the python search paths
    PYTHON_AUX_PATHS = expConfig.get(input_arguments.processingEnvironment, 'PYTHON_AUX_PATHS')
    PYTHON_AUX_PATHS = PYTHON_AUX_PATHS.split(':')
    PYTHON_AUX_PATHS.extend(sys.path)
    sys.path = PYTHON_AUX_PATHS
    #####################################################################################
    #     Prepend the shell environment search paths
    PROGRAM_PATHS = expConfig.get(input_arguments.processingEnvironment, 'PROGRAM_PATHS')
    PROGRAM_PATHS = PROGRAM_PATHS.split(':')
    PROGRAM_PATHS.extend(os.environ['PATH'].split(':'))
    PROGRAM_PATHS = [os.path.dirname(__file__)] + PROGRAM_PATHS
    print "Adding directory {0} to PATH...".format(os.path.dirname(__file__))
    os.environ['PATH'] = ':'.join(PROGRAM_PATHS)
    ######################################################################################
    # Get virtualenv source file
    if expConfig.has_option(input_arguments.processingEnvironment, 'VIRTUALENV'):
        print "Loading virtualenv..."
        VIRTUALENV = expConfig.get(input_arguments.processingEnvironment, 'VIRTUALENV')
        activate_this = os.path.join(VIRTUALENV, 'bin', 'activate_this.py')
        execfile(activate_this, dict(__file__=activate_this))
    ###### Now ensure that all the required packages can be read in from this custom path
    #\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/
    # print sys.path
    from nipype import config  # NOTE:  This needs to occur AFTER the PYTHON_AUX_PATHS has been modified
    config.enable_debug_mode()  # NOTE:  This needs to occur AFTER the PYTHON_AUX_PATHS has been modified
    ##############################################################################
    from nipype.interfaces.base import CommandLine, CommandLineInputSpec, TraitedSpec, File, Directory
    from nipype.interfaces.base import traits, isdefined, BaseInterface
    from nipype.interfaces.utility import Merge, Split, Function, Rename, IdentityInterface
    import nipype.interfaces.io as nio   # Data i/o
    import nipype.pipeline.engine as pe  # pypeline engine
    from nipype.interfaces.freesurfer import ReconAll

    from nipype.utils.misc import package_check
    # package_check('nipype', '5.4', 'tutorial1') ## HACK: Check nipype version
    package_check('numpy', '1.3', 'tutorial1')
    package_check('scipy', '0.7', 'tutorial1')
    package_check('networkx', '1.0', 'tutorial1')
    package_check('IPython', '0.10', 'tutorial1')

    ## Check to ensure that SimpleITK can be found
    import SimpleITK as sitk
    #\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/
    #####################################################################################
    #  FreeSurfer is extraordinarly finicky and is easily confused and incorrect.
    #  Force that all the FREESURFER env vars are set in subsequent scripts by
    #  ensuring that rough versions of these environmental variables are not
    #  set internal to this script.
    prohibited_env_var_exists = False
    for ENVVAR_TO_CHECK in ['FREESURFER_HOME', 'FSFAST_HOME', 'FSF_OUTPUT_FORMAT', 'SUBJECTS_DIR', 'MNI_DIR', 'FSL_DIR']:
        if ENVVAR_TO_CHECK in os.environ:
            prohibited_env_var_exists = True
            print("ERROR: Environmental Variable {0}={1} exists.  Please unset before continuing.".format(ENVVAR_TO_CHECK, os.environ[ENVVAR_TO_CHECK]))
    if prohibited_env_var_exists:
        sys.exit(-1)

    #\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/
    #    Define platform specific output write paths
    mountPrefix = expConfig.get(input_arguments.processingEnvironment, 'MOUNTPREFIX')
    BASEOUTPUTDIR = expConfig.get(input_arguments.processingEnvironment, 'BASEOUTPUTDIR')
    ExperimentBaseDirectoryPrefix = os.path.realpath(os.path.join(BASEOUTPUTDIR, ExperimentName))
    ExperimentBaseDirectoryCache = ExperimentBaseDirectoryPrefix + "_CACHE"
    ExperimentBaseDirectoryResults = ExperimentBaseDirectoryPrefix + "_Results"
    if not os.path.exists(ExperimentBaseDirectoryCache):
        os.makedirs(ExperimentBaseDirectoryCache)
    if not os.path.exists(ExperimentBaseDirectoryResults):
        os.makedirs(ExperimentBaseDirectoryResults)
    if not PreviousExperimentName is None:
        PreviousBaseDirectoryPrefix = os.path.realpath(os.path.join(BASEOUTPUTDIR, PreviousExperimentName))
        PreviousBaseDirectoryResults = PreviousBaseDirectoryPrefix + "_Results"
        assert os.path.exists(PreviousBaseDirectoryResults), "The previous experiment directory does not exist: {0}".format(PreviousBaseDirectoryResults)
    else:
        PreviousBaseDirectoryResults = None
    #    Define workup common reference data sets
    #    The ATLAS needs to be copied to the ExperimentBaseDirectoryPrefix
    #    The ATLAS pathing must stay constant
    ATLASPATH = expConfig.get(input_arguments.processingEnvironment, 'ATLASPATH')
    if not os.path.exists(ATLASPATH):
        print("ERROR:  Invalid Path for Atlas: {0}".format(ATLASPATH))
        sys.exit(-1)
    CACHE_ATLASPATH = os.path.realpath(os.path.join(ExperimentBaseDirectoryCache, 'Atlas'))
    from distutils.dir_util import copy_tree
    if not os.path.exists(CACHE_ATLASPATH):
        print("Copying a reference of the atlas to the experiment cache directory:\n    from: {0}\n    to: {1}".format(ATLASPATH, CACHE_ATLASPATH))
        copy_tree(ATLASPATH, CACHE_ATLASPATH, preserve_mode=1, preserve_times=1)
        ## Now generate the xml file with the correct pathing
        file_replace(os.path.join(ATLASPATH, 'ExtendedAtlasDefinition.xml.in'), os.path.join(CACHE_ATLASPATH, 'ExtendedAtlasDefinition.xml'), "@ATLAS_DIRECTORY@", CACHE_ATLASPATH)
    else:
        print("Atlas already exists in experiment cache directory: {0}".format(CACHE_ATLASPATH))

    CUSTOM_ENVIRONMENT = expConfig.get(input_arguments.processingEnvironment, 'CUSTOM_ENVIRONMENT')
    CUSTOM_ENVIRONMENT = eval(CUSTOM_ENVIRONMENT)
    ## Set custom environmental variables so that subproceses work properly (i.e. for FreeSurfer)
    # print CUSTOM_ENVIRONMENT
    for key, value in CUSTOM_ENVIRONMENT.items():
        # print "SETTING: ", key, value
        os.putenv(key, value)
        os.environ[key] = value
    # print os.environ
    # sys.exit(-1)

    WORKFLOW_COMPONENTS_STRING = expConfig.get('EXPERIMENT_DATA', 'WORKFLOW_COMPONENTS')
    WORKFLOW_COMPONENTS = eval(WORKFLOW_COMPONENTS_STRING)

    ## If freesurfer is requested, then ensure that a sane environment is available
    if 'FREESURFER' in WORKFLOW_COMPONENTS:
        print "FREESURFER NEEDS TO CHECK FOR SANE ENVIRONMENT HERE."

    ## Setup environment for CPU load balancing of ITK based programs.
    total_CPUS = multiprocessing.cpu_count()
    if input_arguments.wfrun == 'helium_all.q' or \
      input_arguments.wfrun == 'helium_all.q_graph' or \
      input_arguments.wfrun == 'ipl_OSX':
        assert expConfig.getboolean(input_arguments.processingEnvironment, 'CLUSTER'), "CLUSTER section not set to true!"
        CLUSTER_QUEUE, CLUSTER_QUEUE_LONG, QSTAT_IMMEDIATE_EXE, QSTAT_CACHED_EXE, MODULES = get_cluster_settings(expConfig)
    elif input_arguments.wfrun == 'local_4':
        os.environ['NSLOTS'] = "{0}".format(total_CPUS / 4)
    elif input_arguments.wfrun == 'local_12':
        os.environ['NSLOTS'] = "{0}".format(total_CPUS / 12)
    elif input_arguments.wfrun == 'local':
        # HACK
        CLUSTER_QUEUE, CLUSTER_QUEUE_LONG, QSTAT_IMMEDIATE_EXE, QSTAT_CACHED_EXE, MODULES = get_cluster_settings(expConfig)
        # END HACK
        os.environ['NSLOTS'] = "{0}".format(total_CPUS / 1)
    elif input_arguments.wfrun == 'ds_runner':
        os.environ['NSLOTS'] = "{0}".format(total_CPUS / 1)
    else:
        print "FAILED RUN: You must specify the run environment type. [helium_all.q,helium_all.q_graph,ipl_OSX,local_4,local_12,local,ds_runner]"
        print input_arguments.wfrun
        sys.exit(-1)

    print "Configuring Pipeline"
    ## Ensure that entire db is built and cached before parallel section starts.
    _ignoreme = OpenSubjectDatabase(ExperimentBaseDirectoryCache, [ "all" ], mountPrefix, subject_data_file)
    to_do_subjects = input_arguments.subject.split(',')
    if to_do_subjects[0] == "all":
        to_do_subjects=_ignoreme.getAllSubjects()
    _ignoreme = None

    ## Create the shell wrapper script for ensuring that all jobs running on remote hosts from SGE
    #  have the same environment as the job submission host.

    JOB_SCRIPT = get_global_sge_script(sys.path, os.environ['PATH'], CUSTOM_ENVIRONMENT, MODULES)
    print JOB_SCRIPT

    # Randomly shuffle to_do_subjects to get max
    import random
    random.shuffle(to_do_subjects)

    ## Make a list of all the arguments to be processed
    sp_args_list = list()
    start_time=time.time()
    subj_index = 1
    for subjectid in to_do_subjects:
        delay = 2.5*subj_index
        subj_index += 1
        print("START DELAY: {0}".format(delay))
        sp_args=(CACHE_ATLASPATH, CLUSTER_QUEUE, CLUSTER_QUEUE_LONG,QSTAT_IMMEDIATE_EXE,QSTAT_CACHED_EXE,
                                  ExperimentBaseDirectoryCache, ExperimentBaseDirectoryResults, subject_data_file,
                                  GLOBAL_DATA_SINK_REWRITE, JOB_SCRIPT, WORKFLOW_COMPONENTS, input_arguments,
                                  mountPrefix, start_time+delay, subjectid, PreviousBaseDirectoryResults)
        sp_args_list.append(sp_args)
    if 'local' in input_arguments.wfrun:
        print("RUNNING WITHOUT POOL BUILDING")
        for sp_args in sp_args_list:
            DoSingleSubjectProcessing(sp_args)
    else:
        ## Make a pool of workers to submit simultaneously
        from multiprocessing import Pool
        myPool = Pool(processes=64,maxtasksperchild=1)
        all_results=myPool.map_async(DoSingleSubjectProcessing,sp_args_list).get(1e100)

        for indx in range(0,len(sp_args_list)):
            if all_results[indx] == False:
                    print "FAILED for {0}".format(sp_args_list[indx][-1])

    print("THIS RUN OF BAW FOR SUBJS {0} HAS COMPLETED".format(to_do_subjects))
    return 0
Esempio n. 43
0
def setup_environment(argv):
    """
    This function...
    :param argv:
    :return: environment, experiment, pipeline, cluster
    """
    print("Configuring environment...")
    import os
    import os.path
    from BAW.utilities.configFileParser import resolveDataSinkOption, parseFile
    from BAW.utilities.pathHandling import validatePath
    from BAW.utilities import misc
    from collections import OrderedDict  # Need OrderedDict internally to ensure consistent ordering
    environment, experiment, pipeline, cluster = parseFile(
        argv["--ExperimentConfig"], argv["--pe"], argv["--workphase"])
    pipeline['ds_overwrite'] = resolveDataSinkOption(argv, pipeline)
    if cluster is None:
        print("Running on local")
        # raise NotImplementedError("Running local has old code and has not been tested!")
        # assert argv["--wfrun"] in argvWFRUN, \
        #    "wfrun  options for clusters can only be given when the configuration file's CLUSTER option == True"
        # os.environ['NSLOTS'] = str(misc.get_cpus(argv["--wf_template_runner"]))
    else:
        load_modules(cluster['modules']
                     )  # Load modules if not already done  ## MODS PATH
        # print os.environ['LOADEDMODULES']
        # if environment['virtualenv_dir'] is not None:  # MODS PATH
        # activate_this = validatePath(
        #    os.path.join(environment['virtualenv_dir'], 'bin', 'activate_this.py'), False, False)
        # if os.path.exists( activate_this ) :
        #    exec(open(activate_this).read(), OrderedDict(__file__=activate_this))
    utilities_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),
                                  'utilities')
    configure_env = validatePath(
        os.path.join(utilities_path, 'configure_env.py'), False, False)
    # Add the AutoWorkup directory to the PYTHONPATH every time - REQUIRED FOR CLUSTER DISPATCHING
    environment['env']['PYTHONPATH'] = environment['env'][
        'PYTHONPATH'] + ":" + os.path.dirname(__file__)

    exec(
        open(configure_env).read(),
        OrderedDict(
            __file__=__file__,
            append_os_path=environment['env']['PATH'],
            append_sys_path=environment['env']['PYTHONPATH']))  # MODS PATH

    print(("@" * 80))
    print((environment['env']['PYTHONPATH']))
    print(("@" * 80))
    print((environment['env']['PATH']))
    print(("@" * 80))

    from nipype import config
    config.enable_debug_mode()
    # config.enable_provenance()

    from BAW.utilities.package_check import verify_packages
    verify_packages()
    if 'FREESURFER' in experiment['components']:  # FREESURFER MODS
        configure_FS = validatePath(
            os.path.join(utilities_path, 'utilities', 'configure_FS.py'),
            False, False)
        exec(
            open(configure_FS).read(),
            OrderedDict(FS_VARS=misc.FS_VARS, env=environment['env']))
        print("FREESURFER needs to check for sane environment here!"
              )  # TODO: raise warning, write method, what???
    for key, value in list(environment['env'].items()):
        if key in ['PATH', 'PYTHONPATH'] + misc.FS_VARS:
            pass
        else:
            os.environ[
                key] = value  # Do not use os.putenv (see Python documentation)
    return environment, experiment, pipeline, cluster
Esempio n. 44
0
def run_examples(example, pipelines, data_path, plugin=None):
    '''
    Run example workflows
    '''

    # Import packages
    from nipype import config
    from nipype.interfaces.base import CommandLine
    from nipype.utils import draw_gantt_chart
    from nipype.pipeline.plugins import log_nodes_cb

    if plugin is None:
        plugin = 'MultiProc'

    print('running example: %s with plugin: %s' % (example, plugin))
    config.enable_debug_mode()
    config.enable_provenance()
    CommandLine.set_default_terminal_output("stream")

    plugin_args = {}
    if plugin == 'MultiProc':
        plugin_args['n_procs'] = cpu_count()

    __import__(example)

    for pipeline in pipelines:
        # Init and run workflow
        wf = getattr(sys.modules[example], pipeline)
        wf.base_dir = os.path.join(os.getcwd(), 'output', example, plugin)
        if os.path.exists(wf.base_dir):
            rmtree(wf.base_dir)

        # Handle a logging directory
        log_dir = os.path.join(os.getcwd(), 'logs', example)
        if os.path.exists(log_dir):
            rmtree(log_dir)
        os.makedirs(log_dir)
        wf.config = {
            'execution': {
                'hash_method': 'timestamp',
                'stop_on_first_rerun': 'true',
                'write_provenance': 'true'
            }
        }

        # Callback log setup
        if example == 'fmri_spm_nested' and plugin == 'MultiProc' and \
           pipeline == 'l2pipeline':
            # Init callback log
            import logging
            cb_log_path = os.path.join(os.path.expanduser('~'), 'callback.log')
            cb_logger = logging.getLogger('callback')
            cb_logger.setLevel(logging.DEBUG)
            handler = logging.FileHandler(cb_log_path)
            cb_logger.addHandler(handler)
            plugin_args = {'n_procs': 4, 'status_callback': log_nodes_cb}
        else:
            plugin_args = {'n_procs': 4}
        try:
            wf.inputs.inputnode.in_data = os.path.abspath(data_path)
        except AttributeError:
            pass  # the workflow does not have inputnode.in_data

        wf.run(plugin=plugin, plugin_args=plugin_args)

        # Draw gantt chart only if pandas is installed
        try:
            import pandas
            pandas_flg = True
        except ImportError as exc:
            pandas_flg = False

        if plugin_args.has_key('status_callback') and pandas_flg:
            draw_gantt_chart.generate_gantt_chart(cb_log_path, 4)
            dst_log_html = os.path.join(os.path.expanduser('~'),
                                        'callback.log.html')
            copyfile(cb_log_path + '.html', dst_log_html)
def PreprocessingMALF( WFName,
                      inputDictionaries,
                      inputAtlasFilename,
                      outputDir
                    ):

    from nipype import config
    config.enable_debug_mode()
    print """ Run WarpSubjectsIntoAtlasWF"""
    import os
    import nipype.interfaces.io as nio
    import nipype.pipeline.engine as pe
    from nipype.interfaces.utility import Function, IdentityInterface
    from PipeLineFunctionHelpers import  POSTERIORS
    from WorkupT1T2TissueClassify import MakePosteriorDictionaryFunc

    PreprocessingWF= pe.Workflow( name=WFName)
    PreprocessingWF.base_dir = outputDir

    #inputAtlasFilename = inputAtlasDir+ "template_t1_clipped.nii.gz"

    """
      Clip Brain 
    """

    """
      - extract image list from dictionaries 
    """
    InputImageListND =pe.Node( interface = Function( function = ExtractImageList,
                                                     input_names = ['inputDictionaries', 
                                                                    'desiredKeys'],
                                                     output_names = ['outputImageList',
                                                                     'outputTypeList']),
                               name='InputImageList')
    InputImageListND.inputs.inputDictionaries = inputDictionaries
    InputImageListND.inputs.desiredKeys = ['baselineImage', 'additionalImages', 'labelMap']

    BrainClipND = pe.MapNode( interface = Function( function = ClipVolumeWithMask,
                                                    input_names = ['inputVolumeFilename',
                                                                   'inputMaskFilename',
                                                                   'inputType',
                                                                   'outputVolumeFilename'] ,
                                                    output_names = ['outputType',
                                                                    'outputVolume']),
                              iterfield=['inputVolumeFilename', 'inputType'],
                              name="BrainClip")

    BrainClipND.inputs.inputMaskFilename = inputDictionaries[ 'brainMask' ] 
    PreprocessingWF.connect( [ (InputImageListND, BrainClipND, [ ('outputImageList', 'inputVolumeFilename') ] ),
                               (InputImageListND, BrainClipND, [ ('outputTypeList', 'inputType') ] ) 
                           ] )
    BrainClipND.inputs.outputVolumeFilename = "SubjectBrainClipped.nii.gz"

    """
      Reconstruct input Dictionaries
    """
    ReconstructDictND = pe.Node( interface = Function( function = ReconstructDictionary,
                                                       input_names = [ 'inputKeys','inputElements'],
                                                       output_names = [ 'outputDictionary'] ),
                                 name = 'ReconstructDict' )

    PreprocessingWF.connect( BrainClipND, 'outputType',
                             ReconstructDictND, 'inputKeys')
    PreprocessingWF.connect( BrainClipND, 'outputVolume',
                             ReconstructDictND, 'inputElements')

    """
      Warp all the subject image into atlas space
    """
    WarpSubjectToAtlasND = pe.Node( interface = Function( function = WarpSubjectToAtlas,
                                                          input_names = [ 'WFName', 
                                                                          'inputDictionaries',
                                                                          'inputAtlasFilename',
                                                                          'outputDirectory' ],
                                                          output_names = [] ),
                                    name = 'WarpSubjectToAtlas' )

    WarpSubjectToAtlasND.inputs.WFName = 'WarpSubjectToAtlasWF'

    PreprocessingWF.connect( ReconstructDictND, 'outputDictionary',
                             WarpSubjectToAtlasND, 'inputDictionaries') 

    WarpSubjectToAtlasND.inputs.inputAtlasFilename = inputAtlasFilename
    WarpSubjectToAtlasND.inputs.outputDirectory = '.'

    PreprocessingWF.write_graph()
    PreprocessingWF.run()
Esempio n. 46
0
def main(argv=None):
    import argparse
    import ConfigParser
    import csv
    import string

    if argv == None:
        argv = sys.argv

    # Create and parse input arguments
    parser = argparse.ArgumentParser(description='Runs a mini version of BuildTemplateParallel')
    group = parser.add_argument_group('Required')
    group.add_argument('-pe', action="store", dest='processingEnvironment', required=True,
                       help='The name of the processing environment to use from the config file')
    group.add_argument('-wfrun', action="store", dest='wfrun', required=True,
                       help='The name of the workflow running plugin to use')
    group.add_argument('-ExperimentConfig', action="store", dest='ExperimentConfig', required=True,
                       help='The path to the file that describes the entire experiment')
    input_arguments = parser.parse_args()


    expConfig = ConfigParser.ConfigParser()
    expConfig.read(input_arguments.ExperimentConfig)

    # Experiment specific information
    session_db=expConfig.get('EXPERIMENT_DATA','SESSION_DB')
    ExperimentName=expConfig.get('EXPERIMENT_DATA','EXPERIMENTNAME')

    # Platform specific information
    #     Prepend the python search paths
    PYTHON_AUX_PATHS=expConfig.get(input_arguments.processingEnvironment,'PYTHON_AUX_PATHS')
    PYTHON_AUX_PATHS=PYTHON_AUX_PATHS.split(':')
    PYTHON_AUX_PATHS.extend(sys.path)
    sys.path=PYTHON_AUX_PATHS
    #     Prepend the shell environment search paths
    PROGRAM_PATHS=expConfig.get(input_arguments.processingEnvironment,'PROGRAM_PATHS')
    PROGRAM_PATHS=PROGRAM_PATHS.split(':')
    PROGRAM_PATHS.extend(os.environ['PATH'].split(':'))
    os.environ['PATH']=':'.join(PROGRAM_PATHS)
    #    Define platform specific output write paths
    BASEOUTPUTDIR=expConfig.get(input_arguments.processingEnvironment,'BASEOUTPUTDIR')
    ExperimentBaseDirectoryPrefix=os.path.realpath(os.path.join(BASEOUTPUTDIR,ExperimentName))
    ExperimentBaseDirectoryCache=ExperimentBaseDirectoryPrefix+"_CACHE"
    ExperimentBaseDirectoryResults=ExperimentBaseDirectoryPrefix +"_Results"
    if not os.path.exists(ExperimentBaseDirectoryCache):
        os.makedirs(ExperimentBaseDirectoryCache)
    if not os.path.exists(ExperimentBaseDirectoryResults):
        os.makedirs(ExperimentBaseDirectoryResults)

    print os.environ
    #sys.exit(-1)

    CLUSTER_QUEUE=expConfig.get(input_arguments.processingEnvironment,'CLUSTER_QUEUE')

    ## Setup environment for CPU load balancing of ITK based programs.
    import multiprocessing
    total_CPUS=multiprocessing.cpu_count()
    if input_arguments.wfrun == 'helium_all.q':
        pass
    elif input_arguments.wfrun == 'ipl_OSX':
        pass
    elif input_arguments.wfrun == 'local_4':
        os.environ['NSLOTS']="{0}".format(total_CPUS/4)
    elif input_arguments.wfrun == 'local_3':
        os.environ['NSLOTS']="{0}".format(total_CPUS/3)
    elif input_arguments.wfrun == 'local_12':
        os.environ['NSLOTS']="{0}".format(total_CPUS/12)
    elif input_arguments.wfrun == 'local':
        os.environ['NSLOTS']="{0}".format(total_CPUS/1)
    else:
        print "You must specify the run environment type. [helium_all.q,ipl_OSX,local_3,local_4,local_12,local]"
        print input_arguments.wfrun
        sys.exit(-1)

    print "Configuring Pipeline"
    from nipype import config  ## NOTE:  This needs to occur AFTER the PYTHON_AUX_PATHS has been modified
    config.enable_debug_mode() ## NOTE:  This needs to occur AFTER the PYTHON_AUX_PATHS has been modified
    import buildTemplateParallelDriver ## NOTE:  This needs to occur AFTER the PYTHON_AUX_PATHS has been modified
    btp=buildTemplateParallelDriver.BuildTemplateParallelWorkFlow(
      ExperimentBaseDirectoryCache,
      ExperimentBaseDirectoryResults,
      session_db)
    print "Start Processing"

    ## Create the shell wrapper script for ensuring that all jobs running on remote hosts from SGE
    #  have the same environment as the job submission host.
    JOB_SCRIPT=get_global_sge_script(sys.path,PROGRAM_PATHS)
    print JOB_SCRIPT

    SGEFlavor='SGE'
    if input_arguments.wfrun == 'helium_all.q':
        btp.run(plugin=SGEFlavor,
            plugin_args=dict(template=JOB_SCRIPT,qsub_args="-S /bin/bash -pe smp1 1-4 -l mem_free=4000M -o /dev/null -e /dev/null "+CLUSTER_QUEUE))
    if input_arguments.wfrun == 'helium_all.q_graph':
        SGEFlavor='SGEGraph' #Use the SGEGraph processing
        btp.run(plugin=SGEFlavor,
            plugin_args=dict(template=JOB_SCRIPT,qsub_args="-S /bin/bash -pe smp1 1-4 -l mem_free=4000M -o /dev/null -e /dev/null "+CLUSTER_QUEUE))
    elif input_arguments.wfrun == 'ipl_OSX':
        btp.write_graph()
        print "Running On ipl_OSX"
        btp.run(plugin=SGEFlavor,
            plugin_args=dict(template=JOB_SCRIPT,qsub_args="-S /bin/bash -pe smp1 1-4 -l mem_free=4000M -o /dev/null -e /dev/null "+CLUSTER_QUEUE))
    elif input_arguments.wfrun == 'local_4':
        btp.write_graph()
        print "Running with 4 parallel processes on local machine"
        btp.run(plugin='MultiProc', plugin_args={'n_procs' : 4})
    elif input_arguments.wfrun == 'local_3':
        btp.write_graph()
        print "Running with 3 parallel processes on local machine"
        btp.run(plugin='MultiProc', plugin_args={'n_procs' : 3})
    elif input_arguments.wfrun == 'local_12':
        btp.write_graph()
        print "Running with 12 parallel processes on local machine"
        btp.run(plugin='MultiProc', plugin_args={'n_procs' : 12})
    elif input_arguments.wfrun == 'local':
        try:
            btp.write_graph()
        except:
            pass
        print "Running sequentially on local machine"
        btp.run()
    else:
        print "You must specify the run environment type. [helium_all.q,ipl_OSX,local_3,local_4,local_12,local]"
        print input_arguments.wfrun
        sys.exit(-1)
def unitWorkUp ( configurationFilename, 
                 doApply = False,
                 baseDir = "."):
    import os
    import sys
    import nipype.pipeline.engine as pe
    from nipype.interfaces.utility import Function
    import ConfigurationParser
    import crossValidationUnit as this
    
    from nipype import config
    config.enable_debug_mode()
    
    workflow = pe.Workflow( name = 'balancedTraning' )
    workflow.base_dir = baseDir
    
    configurationMap = ConfigurationParser.ConfigurationSectionMap( configurationFilename) 
    Options          = configurationMap[ 'Options' ]
    roiDict          = Options[ 'roiBooleanCreator'.lower() ]

    #
    #-------------------------------- filenameGeneratorND is dummy node
    # to create proper probability file location for nipype
    #

    filenameGeneratorND = pe.Node( name      = "filenameGeneratorND",
                                   interface = Function( 
                                      input_names  = ['roiList',
                                                      'gaussianSigma'],
                                      output_names = ['probabilityMapFilename'],
                                      function     = this.getProbabilityMapFilename )
                                 )
    filenameGeneratorND.inputs.roiList = roiDict.keys()

    #
    #--------------------------------  start from generate probability
    #
    probabilityMapGeneratorND = pe.Node( name = "probabilityMapGeneratorND",
                                         interface = Function( 
                                             input_names = ['configurationFilename',
                                                            'probabilityMapDict',
                                                            'gaussianSigma',
                                                            'outputXmlFilename'],
                                             output_names = [ 'probabilityMapDict',
                                                              'outputXmlFilename',
                                                              'outputConfigurationFilename'],
                                             function     = ConfigurationParser.BRAINSCutGenerateProbabilityMap )
                                       )
    
    probabilityMapGeneratorND.inputs.outputXmlFilename = 'netConfiguration.xml'
    probabilityMapGeneratorND.inputs.configurationFilename = configurationFilename 
    probabilityMapGeneratorND.inputs.gaussianSigma = Options[ 'gaussianSigma'.lower() ]
    
    workflow.connect( filenameGeneratorND, 'probabilityMapFilename',
                      probabilityMapGeneratorND, 'probabilityMapDict' )
    
    #
    #--------------------------------  create vectors for each ROI
    #
    configFileND = pe.Node( name = "configFileND",
                            interface = Function(
                                input_names = ['originalFilename',
                                               'editedFilenamePrefix' ],
                                output_names = ['editiedFilenames'],
                                function     = ConfigurationParser.ConfigurationFileEditor ) 
                          )
    
    configFileND.inputs.originalFilename = configurationFilename  
    configFileND.inputs.editedFilenamePrefix = 'ROI'
    workflow.add_nodes( [ configFileND ] )
    
    vectorCreatorND = pe.MapNode( name = "vectorCreatorND", 
                                  interface = Function(
                                      input_names = ['configurationFilename',
                                                     'probabilityMapDict',
                                                     'normalization',
                                                     'outputXmlFilename',
                                                     'outputVectorFilename'],
                                      output_names = ['outputVectorFilename',
                                                      'outputVectorHdrFilename',
                                                      'outputNormalization',
                                                      'outputXmlFilename'],
                                      function     = ConfigurationParser.BRAINSCutCreateVector ),
                                  iterfield = [ 'configurationFilename']
                                )
    vectorCreatorND.inputs.outputVectorFilename = 'oneROIVectorFile.txt'
    vectorCreatorND.inputs.outputXmlFilename = 'oneROICreateVectorNetConfiguration.xml'
    import ast
    normalizationOption = Options[ 'normalization'.lower()]  
    #normalizationOption = ast.literal_eval( Options[ 'normalization'.lower()]  )
    print( """Normalization Option: {str}
           """.format( str=normalizationOption ) )
    vectorCreatorND.iterables = ( 'normalization', normalizationOption )
    #
    #--------------------------------  workflow connections
    #
    workflow.connect( configFileND, 'editiedFilenames',
                      vectorCreatorND, 'configurationFilename' )
    workflow.connect( probabilityMapGeneratorND, 'probabilityMapDict',
                      vectorCreatorND, 'probabilityMapDict' )
    
    #
    #--------------------------------  balance and combine each ROI vectors
    #
    balaceND = pe.Node( name = "balanceND",
                        interface = Function(
                            input_names = ['inputVectorFilenames'],
                            output_names = ['outputVectorFilenames',
                                            'outputVectorHdrFilenames'],
                            function = ConfigurationParser.BalanceInputVectors )
                      )
    workflow.connect( vectorCreatorND, 'outputVectorFilename',
                      balaceND, 'inputVectorFilenames' )
    
    combineND = pe.Node( name = "combineND",
                         interface = Function(
                            input_names = ['inputVectorFilenames',
                                           'outputVectorFilename'],
                            output_names = ['outputVectorFilename',
                                            'outputVectorHdrFilename'],
                            function = ConfigurationParser.CombineInputVectors )
                       )
    workflow.connect( balaceND, 'outputVectorFilenames',
                      combineND, 'inputVectorFilenames')
    
    combineND.inputs.outputVectorFilename = 'allCombinedVector.txtANN'
    
    #
    #--------------------------------  train
    #
    trainND = pe.Node( name = "trainND", 
                       interface = Function( 
                           input_names = ['configurationFilename',
                                          'inputVectorFilename',
                                          'outputModelFilenamePrefix',
                                          'outputXmlFilename',
                                          'methodParameter'],
                           output_names = ['outputTrainedModelFilename',
                                           'outputMethodParameter'],
                           function = ConfigurationParser.BRAINSCutTrainModel ),
                     )
    #methodParameter = { '--method': 'RandomForest',
    #                    '--numberOfTrees': 60,
    #                    '--randomTreeDepth ': 60 }
    import ast
    methodFromConfiguFile =  Options['modelParameter'.lower()] 
    trainND.iterables= ( 'methodParameter', methodFromConfiguFile ) 
    trainND.inputs.outputXmlFilename = 'trianNetConfiguration.xml'
    trainND.inputs.outputModelFilenamePrefix = 'trainModelFile.txt'
    
    workflow.connect( probabilityMapGeneratorND, 'outputConfigurationFilename',
                      trainND, 'configurationFilename')
    workflow.connect( combineND, 'outputVectorFilename',
                      trainND, 'inputVectorFilename')
    
    #
    #--------------------------------  apply
    #
    # make output dir for each subject as a 
    if doApply:
        applyND = pe.Node( name = "applyND", 
                           interface = Function( 
                               input_names = ['configurationFilename',
                                              'probabilityMapDict',
                                              'normalization',
                                              'inputModelFilename',
                                              'methodParameter',
                                              'outputXmlFilename'
                                              ],
                               output_names = ['outputLabelDict'],
                               function = ConfigurationParser.BRAINSCutApplyModel )
                         )
        #methodParameter = { '--method': 'RandomForest',
        #                    '--numberOfTrees': 60,
        #                    '--randomTreeDepth ': 60 }
        applyND.inputs.outputXmlFilename = 'applyConfiguration.xml'
        workflow.connect( probabilityMapGeneratorND, 'outputConfigurationFilename',
                          applyND, 'configurationFilename')
        workflow.connect( vectorCreatorND, 'outputNormalization',
                          applyND, 'normalization' )
        workflow.connect( probabilityMapGeneratorND, 'probabilityMapDict',
                          applyND, 'probabilityMapDict' )
        workflow.connect( trainND, 'outputTrainedModelFilename',
                          applyND, 'inputModelFilename' )
        workflow.connect( trainND, 'outputMethodParameter',
                          applyND, 'methodParameter' )
        #
        # analysis
        #
        #analysisND = pe.Node( name = "analysisND",
        #                      interface = Function(
        #                          input_names['inputImageDict',
        #                                      'inputManualDict',
        #                                      'outputCSVFilename'],
        #                          output_names['outputCSVFilename'],
        #                          function = analysis.similarityFromApplyOutput )
        #                    )

    #
    #
    ##workflow.run(updatehash=True)
    workflow.run()
Esempio n. 48
0
def run_examples(example, pipelines, data_path, plugin=None):
    '''
    Run example workflows
    '''

    # Import packages
    from nipype import config
    from nipype.interfaces.base import CommandLine
    from nipype.utils import draw_gantt_chart
    from nipype.pipeline.plugins import log_nodes_cb

    if plugin is None:
        plugin = 'MultiProc'

    print('running example: %s with plugin: %s' % (example, plugin))
    config.enable_debug_mode()
    config.enable_provenance()
    CommandLine.set_default_terminal_output("stream")

    plugin_args = {}
    if plugin == 'MultiProc':
        plugin_args['n_procs'] = cpu_count()

    __import__(example)

    for pipeline in pipelines:
        # Init and run workflow
        wf = getattr(sys.modules[example], pipeline)
        wf.base_dir = os.path.join(os.getcwd(), 'output', example, plugin)
        if os.path.exists(wf.base_dir):
            rmtree(wf.base_dir)

        # Handle a logging directory
        log_dir = os.path.join(os.getcwd(), 'logs', example)
        if os.path.exists(log_dir):
            rmtree(log_dir)
        os.makedirs(log_dir)
        wf.config = {'execution': {'hash_method': 'timestamp',
                                   'stop_on_first_rerun': 'true',
                                   'write_provenance': 'true'}}

        # Callback log setup
        if example == 'fmri_spm_nested' and plugin == 'MultiProc' and \
           pipeline == 'l2pipeline':
            # Init callback log
            import logging
            cb_log_path = os.path.join(os.path.expanduser('~'), 'callback.log')
            cb_logger = logging.getLogger('callback')
            cb_logger.setLevel(logging.DEBUG)
            handler = logging.FileHandler(cb_log_path)
            cb_logger.addHandler(handler)
            plugin_args = {'n_procs' : 4, 'status_callback' : log_nodes_cb}
        else:
            plugin_args = {'n_procs' : 4}
        try:
            wf.inputs.inputnode.in_data = os.path.abspath(data_path)
        except AttributeError:
            pass # the workflow does not have inputnode.in_data

        wf.run(plugin=plugin, plugin_args=plugin_args)

        # Draw gantt chart only if pandas is installed
        try:
            import pandas
            pandas_flg = True
        except ImportError as exc:
            pandas_flg = False

        if plugin_args.has_key('status_callback') and pandas_flg:
            draw_gantt_chart.generate_gantt_chart(cb_log_path, 4)
            dst_log_html = os.path.join(os.path.expanduser('~'), 'callback.log.html')
            copyfile(cb_log_path+'.html', dst_log_html)
def run_workflow():
    raise Exception("This code was not tested after refactoring to be used by "
                    "preprocessing_workflow.py.")
    config.enable_debug_mode()

    # ------------------ Specify variables
    ds_root = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))

    data_dir = ds_root
    output_dir = 'func_unwarp'
    working_dir = 'workingdirs/func_unwarp'

    subject_list = ['eddy']
    session_list = ['20170511']

    # ------------------ Input Files
    infosource = Node(IdentityInterface(fields=[
        'subject_id',
        'session_id',
    ]),
                      name="infosource")

    infosource.iterables = [
        ('session_id', session_list),
        ('subject_id', subject_list),
    ]
    # SelectFiles
    templates = {
        'funcs':
        'resampled-isotropic-1mm/sub-{subject_id}/ses-{session_id}/func/'
        'sub-{subject_id}_ses-{session_id}_'
        'task-*_bold_res-1x1x1_preproc.nii.gz',

        # Use *-roi for testing
        #    'task-curvetracing_run-01_bold_res-1x1x1_preproc-roi.nii.gz',
        'fmap_phasediff':
        'resampled-isotropic-1mm/sub-{subject_id}/ses-{session_id}/fmap/'
        'sub-{subject_id}_ses-{session_id}_phasediff_res-1x1x1_preproc'
        '.nii.gz',
        'fmap_magnitude':
        'resampled-isotropic-1mm/sub-{subject_id}/ses-{session_id}/fmap/'
        'sub-{subject_id}_ses-{session_id}_magnitude1_res-1x1x1_preproc'
        '.nii.gz',
        'fmap_mask':
        'transformed-manual-fmap-mask/sub-{subject_id}/ses-{session_id}/fmap/'
        'sub-{subject_id}_ses-{session_id}_'
        'magnitude1_res-1x1x1_preproc.nii.gz',
    }
    inputfiles = Node(nio.SelectFiles(templates, base_directory=data_dir),
                      name="input_files")

    # ------------------ Output Files
    # Datasink
    outputfiles = Node(nio.DataSink(base_directory=ds_root,
                                    container=output_dir,
                                    parameterization=True),
                       name="output_files")

    # Use the following DataSink output substitutions
    outputfiles.inputs.substitutions = [
        ('subject_id_', 'sub-'),
        ('session_id_', 'ses-'),
        ('/undistorted/', '/'),
        ('/undistorted_masks/', '/'),
        ('_unwarped.nii.gz', '.nii.gz'),
        ('phasediff_radians_unwrapped_mask', '_rec-unwrapped_phasediff'),
    ]
    outputfiles.inputs.regexp_substitutions = [
        (r'_fugue[0-9]+/', r'func/'), (r'_undistort_masks[0-9]+/', r'func/'),
        (r'_ses-([a-zA-Z0-9]*)_sub-([a-zA-Z0-9]*)', r'sub-\2/ses-\1')
    ]

    # -------------------------------------------- Create Pipeline

    workflow = Workflow(name='undistort',
                        base_dir=os.path.join(ds_root, working_dir))

    workflow.connect([(infosource, inputfiles, [('subject_id', 'subject_id'),
                                                ('session_id', 'session_id')])
                      ])

    undistort_flow = create_workflow()

    # Connect sub-workflow inputs
    workflow.connect([(inputfiles, undistort_flow, [
        ('subject_id', 'in.subject_id'),
        ('session_id', 'in.session_id'),
        ('fmap_phasediff', 'in.fmap_phasediff'),
        ('fmap_magnitude', 'in.fmap_magnitude'),
        ('fmap_mask', 'in.fmap_mask'),
    ]), (undistort_flow, outputfiles, [
        ('out.unwarped_file', 'undistorted'),
    ])])

    workflow.connect(undistort_flow, 'unwarped_file', outputfiles,
                     'undistorted')
    workflow.connect(undistort_masks, 'unwarped_file', outputfiles,
                     'undistorted_masks')

    workflow.stop_on_first_crash = True
    workflow.keep_inputs = True
    workflow.remove_unnecessary_outputs = False
    workflow.write_graph()
    workflow.run()
Esempio n. 50
0
def setup_environment(argv):
    print "Configuring environment..."
    import os
    import os.path
    from utilities.configFileParser import resolveDataSinkOption, parseFile
    from utilities.pathHandling import validatePath
    from utilities import misc

    environment, experiment, pipeline, cluster = parseFile(
        argv["--ExperimentConfig"], argv["--pe"], argv["--workphase"]
    )
    pipeline["ds_overwrite"] = resolveDataSinkOption(argv, pipeline)
    if cluster is None:
        print "Running on local"
        # raise NotImplementedError("Running local has old code and has not been tested!")
        # assert argv["--wfrun"] in argvWFRUN, \
        #    "wfrun  options for clusters can only be given when the configuration file's CLUSTER option == True"
        # os.environ['NSLOTS'] = str(misc.get_cpus(argv["--wf_template_runner"]))
    else:
        load_modules(cluster["modules"])  # Load modules if not already done  ## MODS PATH
        # print os.environ['LOADEDMODULES']
    if environment["virtualenv_dir"] is not None:  # MODS PATH
        activate_this = validatePath(
            os.path.join(environment["virtualenv_dir"], "bin", "activate_this.py"), False, False
        )
        execfile(activate_this, dict(__file__=activate_this))
    utilities_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "utilities")
    configure_env = validatePath(os.path.join(utilities_path, "configure_env.py"), False, False)
    # Add the AutoWorkup directory to the PYTHONPATH every time - REQUIRED FOR CLUSTER DISPATCHING
    environment["env"]["PYTHONPATH"] = environment["env"]["PYTHONPATH"] + ":" + os.path.dirname(__file__)

    execfile(
        configure_env,
        dict(
            __file__=__file__,
            append_os_path=environment["env"]["PATH"],
            append_sys_path=environment["env"]["PYTHONPATH"],
        ),
    )  # MODS PATH

    print ("@" * 80)
    print environment["env"]["PYTHONPATH"]
    print ("@" * 80)
    print environment["env"]["PATH"]
    print ("@" * 80)

    from nipype import config

    config.enable_debug_mode()
    from utilities.package_check import verify_packages

    verify_packages()
    if "FREESURFER" in experiment["components"]:  # FREESURFER MODS
        configure_FS = validatePath(os.path.join(utilities_path, "utilities", "configure_FS.py"), False, False)
        execfile(configure_FS, dict(FS_VARS=misc.FS_VARS, env=environment["env"]))
        print "FREESURFER needs to check for sane environment here!"  # TODO: raise warning, write method, what???
    for key, value in environment["env"].items():
        if key in ["PATH", "PYTHONPATH"] + misc.FS_VARS:
            pass
        else:
            os.environ[key] = value  # Do not use os.putenv (see Python documentation)
    return environment, experiment, pipeline, cluster
Esempio n. 51
0
from __future__ import print_function

__author__ = 'johnsonhj'

######################################################################################
###### Now ensure that all the required packages can be read in from this custom path
# \/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/
# print sys.path
from nipype import config  # NOTE:  This needs to occur AFTER the PYTHON_AUX_PATHS has been modified

config.enable_debug_mode()  # NOTE:  This needs to occur AFTER the PYTHON_AUX_PATHS has been modified
# config.enable_provenance()

##############################################################################
from nipype.interfaces.base import CommandLine, CommandLineInputSpec, TraitedSpec, File, Directory
from nipype.interfaces.base import traits, isdefined, BaseInterface
from nipype.interfaces.utility import Merge, Split, Function, Rename, IdentityInterface
import nipype.interfaces.io as nio  # Data i/o
import nipype.pipeline.engine as pe  # pypeline engine
import nipype.interfaces.io as nio  # Data i/o
from nipype.interfaces.freesurfer import ReconAll

from nipype.utils.misc import package_check

# package_check('nipype', '5.4', 'tutorial1') ## HACK: Check nipype version
package_check('numpy', '1.3', 'tutorial1')
package_check('scipy', '0.7', 'tutorial1')
package_check('networkx', '1.0', 'tutorial1')
package_check('IPython', '0.10', 'tutorial1')

import os
Esempio n. 52
0
from __future__ import print_function
__author__ = 'johnsonhj'

######################################################################################
###### Now ensure that all the required packages can be read in from this custom path
#\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/
# print sys.path
from nipype import config  # NOTE:  This needs to occur AFTER the PYTHON_AUX_PATHS has been modified
config.enable_debug_mode()  # NOTE:  This needs to occur AFTER the PYTHON_AUX_PATHS has been modified
#config.enable_provenance()

##############################################################################
from nipype.interfaces.base import CommandLine, CommandLineInputSpec, TraitedSpec, File, Directory
from nipype.interfaces.base import traits, isdefined, BaseInterface
from nipype.interfaces.utility import Merge, Split, Function, Rename, IdentityInterface
import nipype.interfaces.io as nio   # Data i/o
import nipype.pipeline.engine as pe  # pypeline engine
import nipype.interfaces.io as nio   # Data i/o
from nipype.interfaces.freesurfer import ReconAll

from nipype.utils.misc import package_check
# package_check('nipype', '5.4', 'tutorial1') ## HACK: Check nipype version
package_check('numpy', '1.3', 'tutorial1')
package_check('scipy', '0.7', 'tutorial1')
package_check('networkx', '1.0', 'tutorial1')
package_check('IPython', '0.10', 'tutorial1')

import os
## Check to ensure that SimpleITK can be found
#import SimpleITK as sitk
Esempio n. 53
0
def run_workflow(csv_file, use_pbs, contrasts_name, template):
    workflow = pe.Workflow(name='run_level1flow')
    workflow.base_dir = os.path.abspath('./workingdirs')

    from nipype import config, logging
    config.update_config({
        'logging': {
            'log_directory': os.path.join(workflow.base_dir, 'logs'),
            'log_to_file': True,
            'workflow_level': 'DEBUG',
            'interface_level': 'DEBUG',
        }
    })
    logging.update_logging(config)

    config.enable_debug_mode()

    # redundant with enable_debug_mode() ...
    workflow.stop_on_first_crash = True
    workflow.remove_unnecessary_outputs = False
    workflow.keep_inputs = True
    workflow.hash_method = 'content'
    """
    Setup the contrast structure that needs to be evaluated. This is a list of
    lists. The inner list specifies the contrasts and has the following format:
    [Name,Stat,[list of condition names],[weights on those conditions]. The
    condition names must match the `names` listed in the `evt_info` function
    described above.
    """

    try:
        import importlib
        mod = importlib.import_module('contrasts.' + contrasts_name)
        contrasts = mod.contrasts
        # event_names = mod.event_names
    except ImportError:
        raise RuntimeError('Unknown contrasts: %s. Must exist as a Python'
                           ' module in contrasts directory!' % contrasts_name)

    modelfit = create_workflow(contrasts)

    import bids_templates as bt

    inputnode = pe.Node(niu.IdentityInterface(fields=[
        'subject_id',
        'session_id',
        'run_id',
    ]),
                        name='input')

    assert csv_file is not None, "--csv argument must be defined!"

    reader = niu.CSVReader()
    reader.inputs.header = True
    reader.inputs.in_file = csv_file
    out = reader.run()
    subject_list = out.outputs.subject
    session_list = out.outputs.session
    run_list = out.outputs.run

    inputnode.iterables = [
        ('subject_id', subject_list),
        ('session_id', session_list),
        ('run_id', run_list),
    ]
    inputnode.synchronize = True

    templates = {
        'funcs':
        'derivatives/featpreproc/highpassed_files/sub-{subject_id}/ses-{session_id}/func/'
        'sub-{subject_id}_ses-{session_id}_*_run-{run_id}*_bold_res-1x1x1_preproc_*.nii.gz',

        # 'funcmasks':
        # 'featpreproc/func_unwarp/sub-{subject_id}/ses-{session_id}/func/'
        #     'sub-{subject_id}_ses-{session_id}_*_run-{run_id}*_bold_res-1x1x1_preproc'
        #     '_mc_unwarped.nii.gz',
        'highpass':
        '******'
        'sub-{subject_id}_ses-{session_id}_*_run-{run_id}_bold_res-1x1x1_preproc_*.nii.gz',
        'motion_parameters':
        'derivatives/featpreproc/motion_corrected/sub-{subject_id}/ses-{session_id}/func/'
        'sub-{subject_id}_ses-{session_id}_*_run-{run_id}_bold_res-1x1x1_preproc.param.1D',
        'motion_outlier_files':
        'derivatives/featpreproc/motion_outliers/sub-{subject_id}/ses-{session_id}/func/'
        'art.sub-{subject_id}_ses-{session_id}_*_run-{run_id}_bold_res-1x1x1_preproc_mc'
        '_maths_outliers.txt',
        'event_log':
        'sub-{subject_id}/ses-{session_id}/func/'
        # 'sub-{subject_id}_ses-{session_id}*_bold_res-1x1x1_preproc'
        'sub-{subject_id}_ses-{session_id}*run-{run_id}*'
        # '.nii.gz',
        '_events.tsv',
        'ref_func':
        'derivatives/featpreproc/reference/func/*.nii.gz',
        'ref_funcmask':
        'derivatives/featpreproc/reference/func_mask/*.nii.gz',
    }

    inputfiles = pe.Node(nio.SelectFiles(templates, base_directory=data_dir),
                         name='in_files')

    workflow.connect([
        (inputnode, inputfiles, [
            ('subject_id', 'subject_id'),
            ('session_id', 'session_id'),
            ('run_id', 'run_id'),
        ]),
    ])

    join_input = pe.JoinNode(
        niu.IdentityInterface(fields=[
            # 'subject_id',
            # 'session_id',
            # 'run_id',
            'funcs',
            'highpass',
            'motion_parameters',
            'motion_outlier_files',
            'event_log',
            'ref_func',
            'ref_funcmask',
        ]),
        joinsource='input',
        joinfield=[
            'funcs',
            'highpass',
            'motion_parameters',
            'motion_outlier_files',
            'event_log',
        ],
        # unique=True,
        name='join_input')

    workflow.connect([
        (inputfiles, join_input, [
            ('funcs', 'funcs'),
            ('highpass', 'highpass'),
            ('motion_parameters', 'motion_parameters'),
            ('motion_outlier_files', 'motion_outlier_files'),
            ('event_log', 'event_log'),
            ('ref_func', 'ref_func'),
            ('ref_funcmask', 'ref_funcmask'),
        ]),
        (join_input, modelfit, [
            ('funcs', 'inputspec.funcs'),
            ('highpass', 'inputspec.highpass'),
            ('motion_parameters', 'inputspec.motion_parameters'),
            ('motion_outlier_files', 'inputspec.motion_outlier_files'),
            ('event_log', 'inputspec.event_log'),
            ('ref_func', 'inputspec.ref_func'),
            ('ref_funcmask', 'inputspec.ref_funcmask'),
        ]),
    ])

    modelfit.inputs.inputspec.fwhm = 2.0
    modelfit.inputs.inputspec.highpass = 50
    modelfit.write_graph(simple_form=True)
    modelfit.write_graph(graph2use='orig', format='png', simple_form=True)
    # modelfit.write_graph(graph2use='detailed', format='png', simple_form=False)

    workflow.stop_on_first_crash = True
    workflow.keep_inputs = True
    workflow.remove_unnecessary_outputs = False
    workflow.write_graph(simple_form=True)
    workflow.write_graph(graph2use='colored', format='png', simple_form=True)
    # workflow.write_graph(graph2use='detailed', format='png', simple_form=False)
    if use_pbs:
        workflow.run(plugin='PBS',
                     plugin_args={'template': os.path.expanduser(template)})
    else:
        workflow.run()
Esempio n. 54
0
#!/usr/bin/env python3

# http://nipype.readthedocs.io/en/latest/users/examples/fmri_fsl.html
# http://miykael.github.io/nipype-beginner-s-guide/firstSteps.html#input-output-stream
import os                                    # system functions

import nipype.interfaces.io as nio           # Data i/o
import nipype.interfaces.fsl as fsl          # fsl
import nipype.interfaces.afni as afni        # afni
from nipype.interfaces.utility import IdentityInterface

from nipype.pipeline.engine import Workflow, Node, MapNode

from nipype import config
config.enable_debug_mode()

from subcode.afni_allin_slices import AFNIAllinSlices

# the create_workflow_allin_slices workflow is the only one that's used
def create_workflow_allin_slices(name='motion_correction', iterfield=['in_file']):
    workflow = Workflow(name=name)
    inputs = Node(IdentityInterface(fields=[
        'subject_id',
        'session_id',

        'ref_func', 
        'ref_func_weights',

        'funcs',
        'funcs_masks',
Esempio n. 55
0
def main(argv=None):
    import argparse
    import ConfigParser
    import csv
    import string

    if argv == None:
        argv = sys.argv

    # Create and parse input arguments
    parser = argparse.ArgumentParser(description='Runs a mini version of BRAINSAutoWorkup')
    group = parser.add_argument_group('Required')
    group.add_argument('-pe', action="store", dest='processingEnvironment', required=True,
                       help='The name of the processing environment to use from the config file')
    group.add_argument('-wfrun', action="store", dest='wfrun', required=True,
                       help='The name of the workflow running plugin to use')
    group.add_argument('-subject', action="store", dest='subject', required=True,
                       help='The name of the subject to process')
    group.add_argument('-ExperimentConfig', action="store", dest='ExperimentConfig', required=True,
                       help='The path to the file that describes the entire experiment')
    parser.add_argument('-doshort', action='store', dest='doshort', default=False, help='If not present, do long')
    parser.add_argument('-rewrite_datasinks', action='store_true', default=False,
                        help='Use if the datasinks should be forced rerun.\nDefault: value in configuration file')
    parser.add_argument('--version', action='version', version='%(prog)s 1.0')
    input_arguments = parser.parse_args()

    expConfig = ConfigParser.ConfigParser()
    expConfig.read(input_arguments.ExperimentConfig)

    # Pipeline-specific information
    GLOBAL_DATA_SINK_REWRITE_FROM_CONFIG = expConfig.getboolean('PIPELINE', 'GLOBAL_DATA_SINK_REWRITE')
    GLOBAL_DATA_SINK_REWRITE=setDataSinkRewriteValue(input_arguments.rewrite_datasinks, GLOBAL_DATA_SINK_REWRITE_FROM_CONFIG)

    # Experiment specific information
    subject_data_file = expConfig.get('EXPERIMENT_DATA', 'SESSION_DB')
    ExperimentName = expConfig.get('EXPERIMENT_DATA', 'EXPERIMENTNAME')
    WORKFLOW_COMPONENTS_STRING = expConfig.get('EXPERIMENT_DATA', 'WORKFLOW_COMPONENTS')
    WORKFLOW_COMPONENTS = eval(WORKFLOW_COMPONENTS_STRING)

    # Platform specific information
    #     Prepend the python search paths
    PYTHON_AUX_PATHS = expConfig.get(input_arguments.processingEnvironment, 'PYTHON_AUX_PATHS')
    PYTHON_AUX_PATHS = PYTHON_AUX_PATHS.split(':')
    PYTHON_AUX_PATHS.extend(sys.path)
    sys.path = PYTHON_AUX_PATHS
    ######################################################################################
    ###### Now ensure that all the required packages can be read in from this custom path
    #\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/
    # print sys.path
    from nipype import config  # NOTE:  This needs to occur AFTER the PYTHON_AUX_PATHS has been modified
    config.enable_debug_mode()  # NOTE:  This needs to occur AFTER the PYTHON_AUX_PATHS has been modified
    ##############################################################################
    from nipype.interfaces.base import CommandLine, CommandLineInputSpec, TraitedSpec, File, Directory
    from nipype.interfaces.base import traits, isdefined, BaseInterface
    from nipype.interfaces.utility import Merge, Split, Function, Rename, IdentityInterface
    import nipype.interfaces.io as nio   # Data i/o
    import nipype.pipeline.engine as pe  # pypeline engine
    from nipype.interfaces.freesurfer import ReconAll

    from nipype.utils.misc import package_check
    # package_check('nipype', '5.4', 'tutorial1') ## HACK: Check nipype version
    package_check('numpy', '1.3', 'tutorial1')
    package_check('scipy', '0.7', 'tutorial1')
    package_check('networkx', '1.0', 'tutorial1')
    package_check('IPython', '0.10', 'tutorial1')

    ## Check to ensure that SimpleITK can be found
    import SimpleITK as sitk
    #\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/
    #####################################################################################
    #  FreeSurfer is extraordinarly finicky and is easily confused and incorrect.
    #  Force that all the FREESURFER env vars are set in subsequent scripts by
    #  ensuring that rough versions of these environmental variables are not
    #  set internal to this script.
    prohibited_env_var_exists = False
    for ENVVAR_TO_CHECK in ['FREESURFER_HOME','FSFAST_HOME','FSF_OUTPUT_FORMAT','SUBJECTS_DIR','MNI_DIR','FSL_DIR']:
       if os.environ.has_key(ENVVAR_TO_CHECK):
           prohibited_env_var_exists = True
           print( "ERROR: Environmental Variable {0}={1} exists.  Please unset before continuing.".format(ENVVAR_TO_CHECK,os.environ[ENVVAR_TO_CHECK]) )
    if prohibited_env_var_exists:
       sys.exit(-1)

    #\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/
    #####################################################################################
    #     Prepend the shell environment search paths
    PROGRAM_PATHS = expConfig.get(input_arguments.processingEnvironment, 'PROGRAM_PATHS')
    PROGRAM_PATHS = PROGRAM_PATHS.split(':')
    PROGRAM_PATHS.extend(os.environ['PATH'].split(':'))
    os.environ['PATH'] = ':'.join(PROGRAM_PATHS)
    #    Define platform specific output write paths
    mountPrefix = expConfig.get(input_arguments.processingEnvironment, 'MOUNTPREFIX')
    BASEOUTPUTDIR = expConfig.get(input_arguments.processingEnvironment, 'BASEOUTPUTDIR')
    ExperimentBaseDirectoryPrefix = os.path.realpath(os.path.join(BASEOUTPUTDIR, ExperimentName))
    ExperimentBaseDirectoryCache = ExperimentBaseDirectoryPrefix + "_CACHE"
    ExperimentBaseDirectoryResults = ExperimentBaseDirectoryPrefix + "_Results"
    if not os.path.exists(ExperimentBaseDirectoryCache):
        os.makedirs(ExperimentBaseDirectoryCache)
    if not os.path.exists(ExperimentBaseDirectoryResults):
        os.makedirs(ExperimentBaseDirectoryResults)
    #    Define workup common reference data sets
    #    The ATLAS needs to be copied to the ExperimentBaseDirectoryPrefix
    #    The ATLAS pathing must stay constant
    ATLASPATH = expConfig.get(input_arguments.processingEnvironment, 'ATLASPATH')
    if not os.path.exists(ATLASPATH):
        print("ERROR:  Invalid Path for Atlas: {0}".format(ATLASPATH))
        sys.exit(-1)
    CACHE_ATLASPATH = os.path.realpath(os.path.join(ExperimentBaseDirectoryCache, 'Atlas'))
    from distutils.dir_util import copy_tree
    if not os.path.exists(CACHE_ATLASPATH):
        print("Copying a reference of the atlas to the experiment cache directory:\n    from: {0}\n    to: {1}".format(ATLASPATH, CACHE_ATLASPATH))
        copy_tree(ATLASPATH, CACHE_ATLASPATH, preserve_mode=1, preserve_times=1)
        ## Now generate the xml file with the correct pathing
        file_replace(os.path.join(ATLASPATH, 'ExtendedAtlasDefinition.xml.in'), os.path.join(CACHE_ATLASPATH, 'ExtendedAtlasDefinition.xml'), "@ATLAS_DIRECTORY@", CACHE_ATLASPATH)
    else:
        print("Atlas already exists in experiment cache directory: {0}".format(CACHE_ATLASPATH))
    #  Just to be safe, copy the model file as well
    BCDMODELPATH = expConfig.get(input_arguments.processingEnvironment, 'BCDMODELPATH')
    CACHE_BCDMODELPATH = os.path.join(ExperimentBaseDirectoryCache, os.path.basename(BCDMODELPATH))
    from distutils.file_util import copy_file
    for BCDModelFile in ['LLSModel-2ndVersion.h5', 'T1-2ndVersion.mdl']:
        if BCDModelFile[-2:] == 'h5':
            BCDModelFile = os.path.join('Transforms_h5', BCDModelFile)
        orig = os.path.join(BCDMODELPATH, BCDModelFile)
        new = os.path.join(CACHE_BCDMODELPATH, BCDModelFile)
        new = new.replace('Transforms_h5/', '')  # Flatten back out, even if you needed to get files from subdirectory.
        if not os.path.exists(CACHE_BCDMODELPATH):
            os.mkdir(CACHE_BCDMODELPATH)
        if not os.path.exists(new):
            print("Copying BCD Model file to cache directory: {0}".format(new))
            copy_file(orig, new, preserve_mode=1, preserve_times=1)
        else:
            print("BCD Model exists in cache directory: {0}".format(new))

    CUSTOM_ENVIRONMENT = expConfig.get(input_arguments.processingEnvironment, 'CUSTOM_ENVIRONMENT')
    CUSTOM_ENVIRONMENT = eval(CUSTOM_ENVIRONMENT)
    ## Set custom environmental variables so that subproceses work properly (i.e. for FreeSurfer)
    # print CUSTOM_ENVIRONMENT
    for key, value in CUSTOM_ENVIRONMENT.items():
        # print "SETTING: ", key, value
        os.putenv(key, value)
        os.environ[key] = value
    # print os.environ
    # sys.exit(-1)

    ## If freesurfer is requested, then ensure that a sane environment is available
    if 'FREESURFER' in WORKFLOW_COMPONENTS:
        print "FREESURFER NEEDS TO CHECK FOR SANE ENVIRONMENT HERE."

    CLUSTER_QUEUE = expConfig.get(input_arguments.processingEnvironment, 'CLUSTER_QUEUE')
    CLUSTER_QUEUE_LONG = expConfig.get(input_arguments.processingEnvironment, 'CLUSTER_QUEUE_LONG')

    ## Setup environment for CPU load balancing of ITK based programs.
    import multiprocessing
    total_CPUS = multiprocessing.cpu_count()
    if input_arguments.wfrun == 'helium_all.q':
        pass
    elif input_arguments.wfrun == 'helium_all.q_graph':
        pass
    elif input_arguments.wfrun == 'ipl_OSX':
        pass
    elif input_arguments.wfrun == 'local_4':
        os.environ['NSLOTS'] = "{0}".format(total_CPUS / 4)
    elif input_arguments.wfrun == 'local_12':
        os.environ['NSLOTS'] = "{0}".format(total_CPUS / 12)
    elif input_arguments.wfrun == 'local':
        os.environ['NSLOTS'] = "{0}".format(total_CPUS / 1)
    elif input_arguments.wfrun == 'ds_runner':
        os.environ['NSLOTS'] = "{0}".format(total_CPUS / 1)
    else:
        print "FAILED RUN: You must specify the run environment type. [helium_all.q,helium_all.q_graph,ipl_OSX,local_4,local_12,local,ds_runner]"
        print input_arguments.wfrun
        sys.exit(-1)

    print "Configuring Pipeline"
    import SessionDB
    subjectDatabaseFile = os.path.join(ExperimentBaseDirectoryCache, 'InternalWorkflowSubjectDB.db')
    subject_list = input_arguments.subject.split(',')
    ## TODO:  Only make DB if db is older than subject_data_file.
    if (not os.path.exists(subjectDatabaseFile)) or (os.path.getmtime(subjectDatabaseFile) < os.path.getmtime(subject_data_file)):
        ExperimentDatabase = SessionDB.SessionDB(subjectDatabaseFile, subject_list)
        ExperimentDatabase.MakeNewDB(subject_data_file, mountPrefix)
        ExperimentDatabase = None
        ExperimentDatabase = SessionDB.SessionDB(subjectDatabaseFile, subject_list)
    else:
        print("Using cached database, {0}".format(subjectDatabaseFile))
        ExperimentDatabase = SessionDB.SessionDB(subjectDatabaseFile, subject_list)
    print "ENTIRE DB for {_subjid}: ".format(_subjid=ExperimentDatabase.getSubjectFilter())
    print "^^^^^^^^^^^^^"
    for row in ExperimentDatabase.getEverything():
        print row
    print "^^^^^^^^^^^^^"

    ## Create the shell wrapper script for ensuring that all jobs running on remote hosts from SGE
    #  have the same environment as the job submission host.
    JOB_SCRIPT = get_global_sge_script(sys.path, PROGRAM_PATHS, CUSTOM_ENVIRONMENT)
    print JOB_SCRIPT

    import WorkupT1T2  # NOTE:  This needs to occur AFTER the PYTHON_AUX_PATHS has been modified
    print "TESTER"
    import ShortWorkupT1T2
    for subjectid in ExperimentDatabase.getAllSubjects():
        if input_arguments.doshort:
            baw200 = ShortWorkupT1T2.ShortWorkupT1T2(subjectid, mountPrefix,
                                                     os.path.join(ExperimentBaseDirectoryCache, str(subjectid)),
                                                     ExperimentBaseDirectoryResults,
                                                     ExperimentDatabase,
                                                     CACHE_ATLASPATH,
                                                     CACHE_BCDMODELPATH,
                                                     GLOBAL_DATA_SINK_REWRITE,
                                                     WORKFLOW_COMPONENTS=WORKFLOW_COMPONENTS, CLUSTER_QUEUE=CLUSTER_QUEUE, CLUSTER_QUEUE_LONG=CLUSTER_QUEUE_LONG)
        else:
            baw200 = WorkupT1T2.WorkupT1T2(subjectid, mountPrefix,
                                           os.path.join(ExperimentBaseDirectoryCache, str(subjectid)),
                                           ExperimentBaseDirectoryResults,
                                           ExperimentDatabase,
                                           CACHE_ATLASPATH,
                                           CACHE_BCDMODELPATH,
                                           GLOBAL_DATA_SINK_REWRITE,
                                           WORKFLOW_COMPONENTS=WORKFLOW_COMPONENTS, CLUSTER_QUEUE=CLUSTER_QUEUE, CLUSTER_QUEUE_LONG=CLUSTER_QUEUE_LONG, SGE_JOB_SCRIPT=JOB_SCRIPT)
        print "Start Processing"

        SGEFlavor = 'SGE'
        try:
            if input_arguments.wfrun == 'helium_all.q':
                try:
                    baw200.write_graph()
                except:
                    pass
                baw200.run(plugin=SGEFlavor,
                           plugin_args=dict(template=JOB_SCRIPT, qsub_args="-S /bin/bash -cwd -pe smp1 1-12 -l h_vmem=19G,mem_free=9G -o /dev/null -e /dev/null " + CLUSTER_QUEUE))
            elif input_arguments.wfrun == 'helium_all.q_graph':
                try:
                    baw200.write_graph()
                except:
                    pass
                SGEFlavor = 'SGEGraph'  # Use the SGEGraph processing
                baw200.run(plugin=SGEFlavor,
                           plugin_args=dict(template=JOB_SCRIPT, qsub_args="-S /bin/bash -cwd -pe smp1 1-12 -l h_vmem=19G,mem_free=9G -o /dev/null -e /dev/null " + CLUSTER_QUEUE))
            elif input_arguments.wfrun == 'ipl_OSX':
                try:
                    baw200.write_graph()
                except:
                    pass
                print "Running On ipl_OSX"
                baw200.run(plugin=SGEFlavor,
                           plugin_args=dict(template=JOB_SCRIPT, qsub_args="-S /bin/bash -cwd -pe smp1 1-12 -l h_vmem=19G,mem_free=9G -o /dev/null -e /dev/null " + CLUSTER_QUEUE))
            elif input_arguments.wfrun == 'local_4':
                try:
                    baw200.write_graph()
                except:
                    pass
                print "Running with 4 parallel processes on local machine"
                baw200.run(plugin='MultiProc', plugin_args={'n_procs': 4})
            elif input_arguments.wfrun == 'local_12':
                try:
                    baw200.write_graph()
                except:
                    pass
                print "Running with 12 parallel processes on local machine"
                baw200.run(plugin='MultiProc', plugin_args={'n_procs': 12})
            elif input_arguments.wfrun == 'ds_runner':
                class ds_runner(object):
                    def run(self, graph, **kwargs):
                        for node in graph.nodes():
                            if '_ds' in node.name.lower():
                                node.run()
                baw200.run(plugin=ds_runner())
            elif input_arguments.wfrun == 'local':
                try:
                    baw200.write_graph()
                except:
                    pass
                print "Running sequentially on local machine"
                # baw200.run(updatehash=True)
                baw200.run()
            else:
                print "You must specify the run environment type. [helium_all.q,helium_all.q_graph,ipl_OSX,local_4,local_12,local]"
                print input_arguments.wfrun
                sys.exit(-1)
        except Exception, err:
            print("ERROR: EXCEPTION CAUGHT IN RUNNING SUBJECT {0}".format(subjectid))
            raise err
# Literal translation of the Perl script https://github.com/andrewjanke/volgenmodel
# to Python and using Nipype interfaces where possible.
#
# Currently this code only runs on a single core as it does not take advantage
# of Nipype's workflow functionality. Future versions will support this.

# Author: Carlo Hamalainen <*****@*****.**>

from nipype import config
config.enable_debug_mode()

import os
import os.path
import subprocess
# import nipype.pipeline.engine as pe
# import nipype.interfaces.io as nio
# import nipype.interfaces.utility as utils

from nipypeminc import  \
        Volcentre,      \
        Norm,           \
        Volpad,         \
        Voliso,         \
        Math,           \
        Pik,            \
        Blur,           \
        Gennlxfm,       \
        XfmConcat,      \
        BestLinReg,     \
        NlpFit,         \
        XfmAvg,         \
Esempio n. 57
0
def build_collect_workflow(args, retval):
    import os
    import glob
    import warnings
    warnings.filterwarnings("ignore")
    import ast
    import pkg_resources
    from pathlib import Path
    import yaml
    import uuid
    from time import strftime
    import shutil

    try:
        import pynets

        print(f"\n\nPyNets Version:\n{pynets.__version__}\n\n")
    except ImportError:
        print("PyNets not installed! Ensure that you are using the correct"
              " python version.")

    # Set Arguments to global variables
    resources = args.pm
    if resources == "auto":
        from multiprocessing import cpu_count
        import psutil
        nthreads = cpu_count() - 1
        procmem = [
            int(nthreads),
            int(list(psutil.virtual_memory())[4] / 1000000000)
        ]
    else:
        procmem = list(eval(str(resources)))
    plugin_type = args.plug
    if isinstance(plugin_type, list):
        plugin_type = plugin_type[0]
    verbose = args.v
    working_path = args.basedir
    work_dir = args.work
    modality = args.modality
    drop_cols = args.dc
    if isinstance(modality, list):
        modality = modality[0]

    if os.path.isdir(work_dir):
        shutil.rmtree(work_dir)

    os.makedirs(f"{str(Path(working_path))}/{modality}_group_topology_auc",
                exist_ok=True)

    wf = collect_all(working_path, modality, drop_cols)

    with open(pkg_resources.resource_filename("pynets", "runconfig.yaml"),
              "r") as stream:
        try:
            hardcoded_params = yaml.load(stream)
            runtime_dict = {}
            execution_dict = {}
            for i in range(len(hardcoded_params["resource_dict"])):
                runtime_dict[list(hardcoded_params["resource_dict"][i].keys(
                ))[0]] = ast.literal_eval(
                    list(hardcoded_params["resource_dict"][i].values())[0][0])
            for i in range(len(hardcoded_params["execution_dict"])):
                execution_dict[list(
                    hardcoded_params["execution_dict"][i].keys())[0]] = list(
                        hardcoded_params["execution_dict"][i].values())[0][0]
        except FileNotFoundError:
            print("Failed to parse runconfig.yaml")

    run_uuid = f"{strftime('%Y%m%d_%H%M%S')}_{uuid.uuid4()}"
    os.makedirs(f"{work_dir}/pynets_out_collection{run_uuid}", exist_ok=True)
    wf.base_dir = f"{work_dir}/pynets_out_collection{run_uuid}"

    if verbose is True:
        from nipype import config, logging

        cfg_v = dict(
            logging={
                "workflow_level": "DEBUG",
                "utils_level": "DEBUG",
                "interface_level": "DEBUG",
                "filemanip_level": "DEBUG",
                "log_directory": str(wf.base_dir),
                "log_to_file": True,
            },
            monitoring={
                "enabled": True,
                "sample_frequency": "0.1",
                "summary_append": True,
                "summary_file": str(wf.base_dir),
            },
        )
        logging.update_logging(config)
        config.update_config(cfg_v)
        config.enable_debug_mode()
        config.enable_resource_monitor()

        import logging

        callback_log_path = f"{wf.base_dir}{'/run_stats.log'}"
        logger = logging.getLogger("callback")
        logger.setLevel(logging.DEBUG)
        handler = logging.FileHandler(callback_log_path)
        logger.addHandler(handler)

    execution_dict["crashdump_dir"] = str(wf.base_dir)
    execution_dict["plugin"] = str(plugin_type)
    cfg = dict(execution=execution_dict)
    for key in cfg.keys():
        for setting, value in cfg[key].items():
            wf.config[key][setting] = value
    try:
        wf.write_graph(graph2use="colored", format="png")
    except BaseException:
        pass
    if verbose is True:
        from nipype.utils.profiler import log_nodes_cb

        plugin_args = {
            "n_procs": int(procmem[0]),
            "memory_gb": int(procmem[1]),
            "status_callback": log_nodes_cb,
            "scheduler": "mem_thread",
        }
    else:
        plugin_args = {
            "n_procs": int(procmem[0]),
            "memory_gb": int(procmem[1]),
            "scheduler": "mem_thread",
        }
    print("%s%s%s" % ("\nRunning with ", str(plugin_args), "\n"))
    wf.run(plugin=plugin_type, plugin_args=plugin_args)
    if verbose is True:
        from nipype.utils.draw_gantt_chart import generate_gantt_chart

        print("Plotting resource profile from run...")
        generate_gantt_chart(callback_log_path, cores=int(procmem[0]))
        handler.close()
        logger.removeHandler(handler)
    return