示例#1
0
def nlmeans_proxy(in_file, settings, noise_mask=None, out_file=None):
    """
    Uses non-local means to denoise 4D datasets
    """
    package_check('dipy', version='0.8.0.dev')
    from dipy.denoise.nlmeans import nlmeans

    if out_file is None:
        fname, fext = op.splitext(op.basename(in_file))
        if fext == '.gz':
            fname, fext2 = op.splitext(fname)
            fext = fext2 + fext
        out_file = op.abspath('./%s_denoise%s' % (fname, fext))

    img = nb.load(in_file)
    hdr = img.get_header()
    data = img.get_data()
    aff = img.get_affine()

    nmask = data[..., 0] > 80
    if noise_mask is not None:
        nmask = noise_mask > 0

    sigma = np.std(data[nmask == 1])
    den = nlmeans(data, sigma, **settings)

    nb.Nifti1Image(den.astype(hdr.get_data_dtype()), aff,
                   hdr).to_filename(out_file)
    return out_file, sigma
示例#2
0
def nlmeans_proxy(in_file, settings,
                  noise_mask=None, out_file=None):
    """
    Uses non-local means to denoise 4D datasets
    """
    package_check('dipy', version='0.8.0.dev')
    from dipy.denoise.nlmeans import nlmeans

    if out_file is None:
        fname, fext = op.splitext(op.basename(in_file))
        if fext == '.gz':
            fname, fext2 = op.splitext(fname)
            fext = fext2 + fext
        out_file = op.abspath('./%s_denoise%s' % (fname, fext))

    img = nb.load(in_file)
    hdr = img.get_header()
    data = img.get_data()
    aff = img.get_affine()

    nmask = data[..., 0] > 80
    if noise_mask is not None:
        nmask = noise_mask > 0

    sigma = np.std(data[nmask == 1])
    den = nlmeans(data, sigma, **settings)

    nb.Nifti1Image(den.astype(hdr.get_data_dtype()), aff,
                   hdr).to_filename(out_file)
    return out_file, sigma
示例#3
0
文件: utils.py 项目: schwarty/nipype
def skip_if_no_package(*args, **kwargs):
    """Raise SkipTest if package_check fails

    Parameters
    ----------
    *args Positional parameters passed to `package_check`
    *kwargs Keyword parameters passed to `package_check`
    """
    package_check(exc_failed_import=SkipTest, exc_failed_check=SkipTest, *args, **kwargs)
示例#4
0
def verify_packages(application='AutoWorkup'):
    package_version = [
        ('nipype', '0.9'),
        ('numpy', '1.8'),
        ('scipy', '0.13'),
        ('networkx', '1.8'),
        # ('IPython', '1.2'),
        # ('SimpleITK', '0.7')
    ]
    for item in package_version:
        package_check(*item, app=application)
示例#5
0
def verify_packages(application='AutoWorkup'):
    package_version = [
        ('nipype', '0.9'),
        ('numpy', '1.8'),
        ('scipy', '0.13'),
        ('networkx', '1.8'),
        # ('IPython', '1.2'),
        # ('SimpleITK', '0.7')
        ]
    for item in package_version:
        package_check(*item, app=application)
示例#6
0
def verify_packages(application="AutoWorkup"):
    """
    This function...

    :param application:
    :return:
    """
    package_version = [
        ("nipype", "0.9"),
        ("numpy", "1.8"),
        ("scipy", "0.13"),
        ("networkx", "1.8"),
        # ('IPython', '1.2'),
        # ('SimpleITK', '0.7')
    ]
    for item in package_version:
        package_check(*item, app=application)
示例#7
0
def verify_packages(application="AutoWorkup"):
    """
    This function...

    :param application:
    :return:
    """
    package_version = [
        ("nipype", "0.9"),
        ("numpy", "1.8"),
        ("scipy", "0.13"),
        ("networkx", "1.8"),
        # ('IPython', '1.2'),
        # ('SimpleITK', '0.7')
    ]
    for item in package_version:
        package_check(*item, app=application)
示例#8
0
def main(argv=None):
    import argparse
    import ConfigParser
    import csv
    import string

    if argv == None:
        argv = sys.argv

    # Create and parse input arguments
    parser = argparse.ArgumentParser(
        description='Runs a mini version of BRAINSAutoWorkup')
    group = parser.add_argument_group('Required')
    group.add_argument(
        '-pe',
        action="store",
        dest='processingEnvironment',
        required=True,
        help=
        'The name of the processing environment to use from the config file')
    group.add_argument('-wfrun',
                       action="store",
                       dest='wfrun',
                       required=True,
                       help='The name of the workflow running plugin to use')
    group.add_argument('-subject',
                       action="store",
                       dest='subject',
                       required=True,
                       help='The name of the subject to process')
    group.add_argument(
        '-ExperimentConfig',
        action="store",
        dest='ExperimentConfig',
        required=True,
        help='The path to the file that describes the entire experiment')
    parser.add_argument('-doshort',
                        action='store',
                        dest='doshort',
                        default=False,
                        help='If not present, do long')
    parser.add_argument(
        '-rewrite_datasinks',
        action='store_true',
        default=False,
        help=
        'Use if the datasinks should be forced rerun.\nDefault: value in configuration file'
    )
    parser.add_argument('--version', action='version', version='%(prog)s 1.0')
    input_arguments = parser.parse_args()

    expConfig = ConfigParser.ConfigParser()
    expConfig.read(input_arguments.ExperimentConfig)

    # Pipeline-specific information
    GLOBAL_DATA_SINK_REWRITE_FROM_CONFIG = expConfig.getboolean(
        'PIPELINE', 'GLOBAL_DATA_SINK_REWRITE')
    GLOBAL_DATA_SINK_REWRITE = setDataSinkRewriteValue(
        input_arguments.rewrite_datasinks,
        GLOBAL_DATA_SINK_REWRITE_FROM_CONFIG)

    # Experiment specific information
    subject_data_file = expConfig.get('EXPERIMENT_DATA', 'SESSION_DB')
    ExperimentName = expConfig.get('EXPERIMENT_DATA', 'EXPERIMENTNAME')
    WORKFLOW_COMPONENTS_STRING = expConfig.get('EXPERIMENT_DATA',
                                               'WORKFLOW_COMPONENTS')
    WORKFLOW_COMPONENTS = eval(WORKFLOW_COMPONENTS_STRING)

    # Platform specific information
    #     Prepend the python search paths
    PYTHON_AUX_PATHS = expConfig.get(input_arguments.processingEnvironment,
                                     'PYTHON_AUX_PATHS')
    PYTHON_AUX_PATHS = PYTHON_AUX_PATHS.split(':')
    PYTHON_AUX_PATHS.extend(sys.path)
    sys.path = PYTHON_AUX_PATHS
    ######################################################################################
    ###### Now ensure that all the required packages can be read in from this custom path
    #\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/
    # print sys.path
    from nipype import config  # NOTE:  This needs to occur AFTER the PYTHON_AUX_PATHS has been modified
    config.enable_debug_mode(
    )  # NOTE:  This needs to occur AFTER the PYTHON_AUX_PATHS has been modified
    ##############################################################################
    from nipype.interfaces.base import CommandLine, CommandLineInputSpec, TraitedSpec, File, Directory
    from nipype.interfaces.base import traits, isdefined, BaseInterface
    from nipype.interfaces.utility import Merge, Split, Function, Rename, IdentityInterface
    import nipype.interfaces.io as nio  # Data i/o
    import nipype.pipeline.engine as pe  # pypeline engine
    from nipype.interfaces.freesurfer import ReconAll

    from nipype.utils.misc import package_check
    # package_check('nipype', '5.4', 'tutorial1') ## HACK: Check nipype version
    package_check('numpy', '1.3', 'tutorial1')
    package_check('scipy', '0.7', 'tutorial1')
    package_check('networkx', '1.0', 'tutorial1')
    package_check('IPython', '0.10', 'tutorial1')

    ## Check to ensure that SimpleITK can be found
    import SimpleITK as sitk
    #\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/
    #####################################################################################
    #  FreeSurfer is extraordinarly finicky and is easily confused and incorrect.
    #  Force that all the FREESURFER env vars are set in subsequent scripts by
    #  ensuring that rough versions of these environmental variables are not
    #  set internal to this script.
    prohibited_env_var_exists = False
    for ENVVAR_TO_CHECK in [
            'FREESURFER_HOME', 'FSFAST_HOME', 'FSF_OUTPUT_FORMAT',
            'SUBJECTS_DIR', 'MNI_DIR', 'FSL_DIR'
    ]:
        if ENVVAR_TO_CHECK in os.environ:
            prohibited_env_var_exists = True
            print(
                "ERROR: Environmental Variable {0}={1} exists.  Please unset before continuing."
                .format(ENVVAR_TO_CHECK, os.environ[ENVVAR_TO_CHECK]))
    if prohibited_env_var_exists:
        sys.exit(-1)

    #\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/
    #####################################################################################
    #     Prepend the shell environment search paths
    PROGRAM_PATHS = expConfig.get(input_arguments.processingEnvironment,
                                  'PROGRAM_PATHS')
    PROGRAM_PATHS = PROGRAM_PATHS.split(':')
    PROGRAM_PATHS.extend(os.environ['PATH'].split(':'))
    os.environ['PATH'] = ':'.join(PROGRAM_PATHS)
    #    Define platform specific output write paths
    mountPrefix = expConfig.get(input_arguments.processingEnvironment,
                                'MOUNTPREFIX')
    BASEOUTPUTDIR = expConfig.get(input_arguments.processingEnvironment,
                                  'BASEOUTPUTDIR')
    ExperimentBaseDirectoryPrefix = os.path.realpath(
        os.path.join(BASEOUTPUTDIR, ExperimentName))
    ExperimentBaseDirectoryCache = ExperimentBaseDirectoryPrefix + "_CACHE"
    ExperimentBaseDirectoryResults = ExperimentBaseDirectoryPrefix + "_Results"
    if not os.path.exists(ExperimentBaseDirectoryCache):
        os.makedirs(ExperimentBaseDirectoryCache)
    if not os.path.exists(ExperimentBaseDirectoryResults):
        os.makedirs(ExperimentBaseDirectoryResults)
    #    Define workup common reference data sets
    #    The ATLAS needs to be copied to the ExperimentBaseDirectoryPrefix
    #    The ATLAS pathing must stay constant
    ATLASPATH = expConfig.get(input_arguments.processingEnvironment,
                              'ATLASPATH')
    if not os.path.exists(ATLASPATH):
        print("ERROR:  Invalid Path for Atlas: {0}".format(ATLASPATH))
        sys.exit(-1)
    CACHE_ATLASPATH = os.path.realpath(
        os.path.join(ExperimentBaseDirectoryCache, 'Atlas'))
    from distutils.dir_util import copy_tree
    if not os.path.exists(CACHE_ATLASPATH):
        print(
            "Copying a reference of the atlas to the experiment cache directory:\n    from: {0}\n    to: {1}"
            .format(ATLASPATH, CACHE_ATLASPATH))
        copy_tree(ATLASPATH,
                  CACHE_ATLASPATH,
                  preserve_mode=1,
                  preserve_times=1)
        ## Now generate the xml file with the correct pathing
        file_replace(
            os.path.join(ATLASPATH, 'ExtendedAtlasDefinition.xml.in'),
            os.path.join(CACHE_ATLASPATH, 'ExtendedAtlasDefinition.xml'),
            "@ATLAS_DIRECTORY@", CACHE_ATLASPATH)
    else:
        print("Atlas already exists in experiment cache directory: {0}".format(
            CACHE_ATLASPATH))
    #  Just to be safe, copy the model file as well
    BCDMODELPATH = expConfig.get(input_arguments.processingEnvironment,
                                 'BCDMODELPATH')
    CACHE_BCDMODELPATH = os.path.join(ExperimentBaseDirectoryCache,
                                      os.path.basename(BCDMODELPATH))
    from distutils.file_util import copy_file
    for BCDModelFile in ['LLSModel-2ndVersion.h5', 'T1-2ndVersion.mdl']:
        if BCDModelFile[-2:] == 'h5':
            BCDModelFile = os.path.join('Transforms_h5', BCDModelFile)
        orig = os.path.join(BCDMODELPATH, BCDModelFile)
        new = os.path.join(CACHE_BCDMODELPATH, BCDModelFile)
        new = new.replace(
            'Transforms_h5/', ''
        )  # Flatten back out, even if you needed to get files from subdirectory.
        if not os.path.exists(CACHE_BCDMODELPATH):
            os.mkdir(CACHE_BCDMODELPATH)
        if not os.path.exists(new):
            print("Copying BCD Model file to cache directory: {0}".format(new))
            copy_file(orig, new, preserve_mode=1, preserve_times=1)
        else:
            print("BCD Model exists in cache directory: {0}".format(new))

    CUSTOM_ENVIRONMENT = expConfig.get(input_arguments.processingEnvironment,
                                       'CUSTOM_ENVIRONMENT')
    CUSTOM_ENVIRONMENT = eval(CUSTOM_ENVIRONMENT)
    ## Set custom environmental variables so that subproceses work properly (i.e. for FreeSurfer)
    # print CUSTOM_ENVIRONMENT
    for key, value in CUSTOM_ENVIRONMENT.items():
        # print "SETTING: ", key, value
        os.putenv(key, value)
        os.environ[key] = value
    # print os.environ
    # sys.exit(-1)

    ## If freesurfer is requested, then ensure that a sane environment is available
    if 'FREESURFER' in WORKFLOW_COMPONENTS:
        print "FREESURFER NEEDS TO CHECK FOR SANE ENVIRONMENT HERE."

    CLUSTER_QUEUE = expConfig.get(input_arguments.processingEnvironment,
                                  'CLUSTER_QUEUE')
    CLUSTER_QUEUE_LONG = expConfig.get(input_arguments.processingEnvironment,
                                       'CLUSTER_QUEUE_LONG')

    ## Setup environment for CPU load balancing of ITK based programs.
    import multiprocessing
    total_CPUS = multiprocessing.cpu_count()
    if input_arguments.wfrun == 'helium_all.q':
        pass
    elif input_arguments.wfrun == 'helium_all.q_graph':
        pass
    elif input_arguments.wfrun == 'ipl_OSX':
        pass
    elif input_arguments.wfrun == 'local_4':
        os.environ['NSLOTS'] = "{0}".format(total_CPUS / 4)
    elif input_arguments.wfrun == 'local_12':
        os.environ['NSLOTS'] = "{0}".format(total_CPUS / 12)
    elif input_arguments.wfrun == 'local':
        os.environ['NSLOTS'] = "{0}".format(total_CPUS / 1)
    elif input_arguments.wfrun == 'ds_runner':
        os.environ['NSLOTS'] = "{0}".format(total_CPUS / 1)
    else:
        print "FAILED RUN: You must specify the run environment type. [helium_all.q,helium_all.q_graph,ipl_OSX,local_4,local_12,local,ds_runner]"
        print input_arguments.wfrun
        sys.exit(-1)

    print "Configuring Pipeline"
    import SessionDB
    subjectDatabaseFile = os.path.join(ExperimentBaseDirectoryCache,
                                       'InternalWorkflowSubjectDB.db')
    subject_list = input_arguments.subject.split(',')
    ## TODO:  Only make DB if db is older than subject_data_file.
    if (not os.path.exists(subjectDatabaseFile)) or (
            os.path.getmtime(subjectDatabaseFile) <
            os.path.getmtime(subject_data_file)):
        ExperimentDatabase = SessionDB.SessionDB(subjectDatabaseFile,
                                                 subject_list)
        ExperimentDatabase.MakeNewDB(subject_data_file, mountPrefix)
        ExperimentDatabase = None
        ExperimentDatabase = SessionDB.SessionDB(subjectDatabaseFile,
                                                 subject_list)
    else:
        print("Using cached database, {0}".format(subjectDatabaseFile))
        ExperimentDatabase = SessionDB.SessionDB(subjectDatabaseFile,
                                                 subject_list)
    print "ENTIRE DB for {_subjid}: ".format(
        _subjid=ExperimentDatabase.getSubjectFilter())
    print "^^^^^^^^^^^^^"
    for row in ExperimentDatabase.getEverything():
        print row
    print "^^^^^^^^^^^^^"

    ## Create the shell wrapper script for ensuring that all jobs running on remote hosts from SGE
    #  have the same environment as the job submission host.
    JOB_SCRIPT = get_global_sge_script(sys.path, PROGRAM_PATHS,
                                       CUSTOM_ENVIRONMENT)
    print JOB_SCRIPT

    import WorkupT1T2  # NOTE:  This needs to occur AFTER the PYTHON_AUX_PATHS has been modified
    print "TESTER"
    import ShortWorkupT1T2
    for subjectid in ExperimentDatabase.getAllSubjects():
        if input_arguments.doshort:
            baw200 = ShortWorkupT1T2.ShortWorkupT1T2(
                subjectid,
                mountPrefix,
                os.path.join(ExperimentBaseDirectoryCache, str(subjectid)),
                ExperimentBaseDirectoryResults,
                ExperimentDatabase,
                CACHE_ATLASPATH,
                CACHE_BCDMODELPATH,
                GLOBAL_DATA_SINK_REWRITE,
                WORKFLOW_COMPONENTS=WORKFLOW_COMPONENTS,
                CLUSTER_QUEUE=CLUSTER_QUEUE,
                CLUSTER_QUEUE_LONG=CLUSTER_QUEUE_LONG)
        else:
            baw200 = WorkupT1T2.WorkupT1T2(
                subjectid,
                mountPrefix,
                os.path.join(ExperimentBaseDirectoryCache, str(subjectid)),
                ExperimentBaseDirectoryResults,
                ExperimentDatabase,
                CACHE_ATLASPATH,
                CACHE_BCDMODELPATH,
                GLOBAL_DATA_SINK_REWRITE,
                WORKFLOW_COMPONENTS=WORKFLOW_COMPONENTS,
                CLUSTER_QUEUE=CLUSTER_QUEUE,
                CLUSTER_QUEUE_LONG=CLUSTER_QUEUE_LONG,
                SGE_JOB_SCRIPT=JOB_SCRIPT)
        print "Start Processing"

        SGEFlavor = 'SGE'
        try:
            if input_arguments.wfrun == 'helium_all.q':
                try:
                    baw200.write_graph()
                except:
                    pass
                baw200.run(
                    plugin=SGEFlavor,
                    plugin_args=dict(
                        template=JOB_SCRIPT,
                        qsub_args=
                        "-S /bin/bash -cwd -pe smp1 1-12 -l h_vmem=19G,mem_free=9G -o /dev/null -e /dev/null "
                        + CLUSTER_QUEUE))
            elif input_arguments.wfrun == 'helium_all.q_graph':
                try:
                    baw200.write_graph()
                except:
                    pass
                SGEFlavor = 'SGEGraph'  # Use the SGEGraph processing
                baw200.run(
                    plugin=SGEFlavor,
                    plugin_args=dict(
                        template=JOB_SCRIPT,
                        qsub_args=
                        "-S /bin/bash -cwd -pe smp1 1-12 -l h_vmem=19G,mem_free=9G -o /dev/null -e /dev/null "
                        + CLUSTER_QUEUE))
            elif input_arguments.wfrun == 'ipl_OSX':
                try:
                    baw200.write_graph()
                except:
                    pass
                print "Running On ipl_OSX"
                baw200.run(
                    plugin=SGEFlavor,
                    plugin_args=dict(
                        template=JOB_SCRIPT,
                        qsub_args=
                        "-S /bin/bash -cwd -pe smp1 1-12 -l h_vmem=19G,mem_free=9G -o /dev/null -e /dev/null "
                        + CLUSTER_QUEUE))
            elif input_arguments.wfrun == 'local_4':
                try:
                    baw200.write_graph()
                except:
                    pass
                print "Running with 4 parallel processes on local machine"
                baw200.run(plugin='MultiProc', plugin_args={'n_procs': 4})
            elif input_arguments.wfrun == 'local_12':
                try:
                    baw200.write_graph()
                except:
                    pass
                print "Running with 12 parallel processes on local machine"
                baw200.run(plugin='MultiProc', plugin_args={'n_procs': 12})
            elif input_arguments.wfrun == 'ds_runner':

                class ds_runner(object):
                    def run(self, graph, **kwargs):
                        for node in graph.nodes():
                            if '_ds' in node.name.lower():
                                node.run()

                baw200.run(plugin=ds_runner())
            elif input_arguments.wfrun == 'local':
                try:
                    baw200.write_graph()
                except:
                    pass
                print "Running sequentially on local machine"
                # baw200.run(updatehash=True)
                baw200.run()
            else:
                print "You must specify the run environment type. [helium_all.q,helium_all.q_graph,ipl_OSX,local_4,local_12,local]"
                print input_arguments.wfrun
                sys.exit(-1)
        except Exception, err:
            print("ERROR: EXCEPTION CAUGHT IN RUNNING SUBJECT {0}".format(
                subjectid))
            raise err
示例#9
0
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""Utility routines for workflow graphs
"""

from copy import deepcopy
from glob import glob
import logging
import os
import re

import numpy as np
from nipype.utils.misc import package_check
package_check('networkx', '1.3')
import networkx as nx

from nipype.interfaces.base import CommandLine, isdefined, Undefined
from nipype.utils.filemanip import fname_presuffix, FileNotFoundError,\
    filename_to_list
from nipype.utils.config import config
from nipype.utils.misc import create_function_from_source
from nipype.interfaces.utility import IdentityInterface

logger = logging.getLogger('workflow')

try:
    dfs_preorder = nx.dfs_preorder
except AttributeError:
    dfs_preorder = nx.dfs_preorder_nodes
    logger.debug('networkx 1.4 dev or higher detected')
示例#10
0
import os

import nibabel as nb
import numpy as np

from nipype.utils.misc import package_check

package_check("nipy")
from nipy.labs.mask import compute_mask

from nipype.interfaces.base import TraitedSpec, BaseInterface, traits, BaseInterfaceInputSpec
from nipype.interfaces.traits import File
from nipype.utils.misc import isdefined


class ComputeMaskInputSpec(BaseInterfaceInputSpec):
    mean_volume = File(exists=True, mandatory=True, desc="mean EPI image, used to compute the threshold for the mask")
    reference_volume = File(
        exists=True,
        desc="reference volume used to compute the mask. If none is give, the \
        mean volume is used.",
    )
    m = traits.Float(desc="lower fraction of the histogram to be discarded")
    M = traits.Float(desc="upper fraction of the histogram to be discarded")
    cc = traits.Bool(desc="if True, only the largest connect component is kept")


class ComputeMaskOutputSpec(TraitedSpec):
    brain_mask = File(exists=True)

示例#11
0
try:
    import itertools.imap as map
except ImportError:
    pass

from collections import OrderedDict
from copy import deepcopy
from glob import glob
from collections import defaultdict
import os
import re
import numpy as np
from nipype.utils.misc import package_check
from functools import reduce

package_check('networkx', '1.3')

import networkx as nx

from ...external.six import string_types
from ...utils.filemanip import (fname_presuffix, FileNotFoundError,
                                filename_to_list, get_related_files)
from ...utils.misc import create_function_from_source, str2bool
from ...interfaces.base import (CommandLine, isdefined, Undefined,
                                InterfaceResult)
from ...interfaces.utility import IdentityInterface
from ...utils.provenance import ProvStore, pm, nipype_ns, get_id

from ... import logging, config
logger = logging.getLogger('workflow')
示例#12
0
"""

import warnings
import numpy as np
import tempfile
from nipype.utils.misc import package_check

from nipype.interfaces.base import (TraitedSpec, File, Undefined, traits,
                                    BaseInterface, isdefined,
                                    BaseInterfaceInputSpec)

from nipype.utils.filemanip import fname_presuffix

have_nitime = True
try:
    package_check('nitime')
except Exception, e:
    have_nitime = False
    warnings.warn('nitime not installed')
else:
    import nitime.analysis as nta
    from nitime.timeseries import TimeSeries
    import nitime.viz as viz


class CoherenceAnalyzerInputSpec(BaseInterfaceInputSpec):

    #Input either csv file, or time-series object and use _xor_inputs to
    #discriminate
    _xor_inputs = ('in_file', 'in_TS')
    in_file = File(desc=('csv file with ROIs on the columns and '
示例#13
0
import nipype.interfaces.freesurfer as fs  # freesurfer
import nipype.interfaces.mrtrix as mrtrix
import nipype.algorithms.misc as misc
import nipype.interfaces.cmtk as cmtk
import nipype.interfaces.dipy as dipy
import inspect
import os, os.path as op  # system functions
from nipype.workflows.dmri.fsl.dti import create_eddy_correct_pipeline
from nipype.workflows.dmri.camino.connectivity_mapping import select_aparc_annot
from nipype.utils.misc import package_check
import warnings
from nipype.workflows.dmri.connectivity.nx import create_networkx_pipeline, create_cmats_to_csv_pipeline
from nipype.workflows.smri.freesurfer import create_tessellation_flow

try:
    package_check("cmp")
except Exception, e:
    warnings.warn("cmp not installed")
else:
    import cmp

"""
This needs to point to the freesurfer subjects directory (Recon-all must have been run on subj1 from the FSL course data)
Alternatively, the reconstructed subject data can be downloaded from:

	* http://dl.dropbox.com/u/315714/subj1.zip

"""

subjects_dir = op.abspath(op.join(op.curdir, "./subjects"))
fs.FSCommand.set_default_subjects_dir(subjects_dir)
示例#14
0
# -*- coding: utf-8 -*-
from nipype.interfaces.base import (TraitedSpec, BaseInterface,
                                    BaseInterfaceInputSpec, File, isdefined,
                                    traits)
from nipype.utils.filemanip import split_filename
import os.path as op
import nibabel as nb, nibabel.trackvis as trk
from nipype.utils.misc import package_check
import warnings

from ... import logging
iflogger = logging.getLogger('interface')

have_dipy = True
try:
    package_check('dipy', version='0.6.0')
except Exception, e:
    have_dipy = False
else:
    from dipy.tracking.utils import density_map


class TrackDensityMapInputSpec(TraitedSpec):
    in_file = File(exists=True,
                   mandatory=True,
                   desc='The input TrackVis track file')
    voxel_dims = traits.List(traits.Float,
                             minlen=3,
                             maxlen=3,
                             desc='The size of each voxel in mm.')
    data_dims = traits.List(traits.Int,
示例#15
0
from nipype.interfaces.base import (
    BaseInterface,
    BaseInterfaceInputSpec,
    traits,
    File,
    TraitedSpec,
    InputMultiPath,
    isdefined,
)
from nipype.utils.filemanip import split_filename
from nipype.utils.misc import package_check

have_cfflib = True
try:
    package_check("cfflib")
except Exception as e:
    have_cfflib = False
else:
    import cfflib as cf


class CFFConverterInputSpec(BaseInterfaceInputSpec):
    graphml_networks = InputMultiPath(File(exists=True), desc="list of graphML networks")
    gpickled_networks = InputMultiPath(File(exists=True), desc="list of gpickled Networkx graphs")

    gifti_surfaces = InputMultiPath(File(exists=True), desc="list of GIFTI surfaces")
    gifti_labels = InputMultiPath(File(exists=True), desc="list of GIFTI labels")
    nifti_volumes = InputMultiPath(File(exists=True), desc="list of NIFTI volumes")
    tract_files = InputMultiPath(File(exists=True), desc="list of Trackvis fiber files")
示例#16
0
def MasterProcessingController(argv=None):
    import argparse
    import ConfigParser
    import csv
    import string

    if argv == None:
        argv = sys.argv

    # Create and parse input arguments
    parser = argparse.ArgumentParser(description='Runs a mini version of BRAINSAutoWorkup')
    group = parser.add_argument_group('Required')
    group.add_argument('-pe', action="store", dest='processingEnvironment', required=True,
                       help='The name of the processing environment to use from the config file')
    group.add_argument('-wfrun', action="store", dest='wfrun', required=True,
                       help='The name of the workflow running plugin to use')
    group.add_argument('-subject', action="store", dest='subject', required=True,
                       help='The name of the subject to process')
    group.add_argument('-ExperimentConfig', action="store", dest='ExperimentConfig', required=True,
                       help='The path to the file that describes the entire experiment')
    parser.add_argument('-rewrite_datasinks', action='store_true', default=False,
                        help='Use if the datasinks should be forced rerun.\nDefault: value in configuration file')
    parser.add_argument('--version', action='version', version='%(prog)s 1.0')
    input_arguments = parser.parse_args()

    expConfig = ConfigParser.ConfigParser()
    expConfig.read(input_arguments.ExperimentConfig)

    # Pipeline-specific information
    GLOBAL_DATA_SINK_REWRITE_FROM_CONFIG = expConfig.getboolean('PIPELINE', 'GLOBAL_DATA_SINK_REWRITE')
    GLOBAL_DATA_SINK_REWRITE = setDataSinkRewriteValue(input_arguments.rewrite_datasinks, GLOBAL_DATA_SINK_REWRITE_FROM_CONFIG)

    # Experiment specific information
    subject_data_file = expConfig.get('EXPERIMENT_DATA', 'SESSION_DB')
    ExperimentName = expConfig.get('EXPERIMENT_DATA', 'EXPERIMENTNAME')
    if expConfig.has_option('EXPERIMENT_DATA', 'PREVIOUSEXPERIMENTNAME'):
        PreviousExperimentName = expConfig.get('EXPERIMENT_DATA', 'PREVIOUSEXPERIMENTNAME')
    else:
         PreviousExperimentName = None

    # Platform specific information
    #     Prepend the python search paths
    PYTHON_AUX_PATHS = expConfig.get(input_arguments.processingEnvironment, 'PYTHON_AUX_PATHS')
    PYTHON_AUX_PATHS = PYTHON_AUX_PATHS.split(':')
    PYTHON_AUX_PATHS.extend(sys.path)
    sys.path = PYTHON_AUX_PATHS
    #####################################################################################
    #     Prepend the shell environment search paths
    PROGRAM_PATHS = expConfig.get(input_arguments.processingEnvironment, 'PROGRAM_PATHS')
    PROGRAM_PATHS = PROGRAM_PATHS.split(':')
    PROGRAM_PATHS.extend(os.environ['PATH'].split(':'))
    PROGRAM_PATHS = [os.path.dirname(__file__)] + PROGRAM_PATHS
    print "Adding directory {0} to PATH...".format(os.path.dirname(__file__))
    os.environ['PATH'] = ':'.join(PROGRAM_PATHS)
    ######################################################################################
    # Get virtualenv source file
    if expConfig.has_option(input_arguments.processingEnvironment, 'VIRTUALENV'):
        print "Loading virtualenv..."
        VIRTUALENV = expConfig.get(input_arguments.processingEnvironment, 'VIRTUALENV')
        activate_this = os.path.join(VIRTUALENV, 'bin', 'activate_this.py')
        execfile(activate_this, dict(__file__=activate_this))
    ###### Now ensure that all the required packages can be read in from this custom path
    #\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/
    # print sys.path
    from nipype import config  # NOTE:  This needs to occur AFTER the PYTHON_AUX_PATHS has been modified
    config.enable_debug_mode()  # NOTE:  This needs to occur AFTER the PYTHON_AUX_PATHS has been modified
    ##############################################################################
    from nipype.interfaces.base import CommandLine, CommandLineInputSpec, TraitedSpec, File, Directory
    from nipype.interfaces.base import traits, isdefined, BaseInterface
    from nipype.interfaces.utility import Merge, Split, Function, Rename, IdentityInterface
    import nipype.interfaces.io as nio   # Data i/o
    import nipype.pipeline.engine as pe  # pypeline engine
    from nipype.interfaces.freesurfer import ReconAll

    from nipype.utils.misc import package_check
    # package_check('nipype', '5.4', 'tutorial1') ## HACK: Check nipype version
    package_check('numpy', '1.3', 'tutorial1')
    package_check('scipy', '0.7', 'tutorial1')
    package_check('networkx', '1.0', 'tutorial1')
    package_check('IPython', '0.10', 'tutorial1')

    ## Check to ensure that SimpleITK can be found
    import SimpleITK as sitk
    #\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/
    #####################################################################################
    #  FreeSurfer is extraordinarly finicky and is easily confused and incorrect.
    #  Force that all the FREESURFER env vars are set in subsequent scripts by
    #  ensuring that rough versions of these environmental variables are not
    #  set internal to this script.
    prohibited_env_var_exists = False
    for ENVVAR_TO_CHECK in ['FREESURFER_HOME', 'FSFAST_HOME', 'FSF_OUTPUT_FORMAT', 'SUBJECTS_DIR', 'MNI_DIR', 'FSL_DIR']:
        if ENVVAR_TO_CHECK in os.environ:
            prohibited_env_var_exists = True
            print("ERROR: Environmental Variable {0}={1} exists.  Please unset before continuing.".format(ENVVAR_TO_CHECK, os.environ[ENVVAR_TO_CHECK]))
    if prohibited_env_var_exists:
        sys.exit(-1)

    #\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/
    #    Define platform specific output write paths
    mountPrefix = expConfig.get(input_arguments.processingEnvironment, 'MOUNTPREFIX')
    BASEOUTPUTDIR = expConfig.get(input_arguments.processingEnvironment, 'BASEOUTPUTDIR')
    ExperimentBaseDirectoryPrefix = os.path.realpath(os.path.join(BASEOUTPUTDIR, ExperimentName))
    ExperimentBaseDirectoryCache = ExperimentBaseDirectoryPrefix + "_CACHE"
    ExperimentBaseDirectoryResults = ExperimentBaseDirectoryPrefix + "_Results"
    if not os.path.exists(ExperimentBaseDirectoryCache):
        os.makedirs(ExperimentBaseDirectoryCache)
    if not os.path.exists(ExperimentBaseDirectoryResults):
        os.makedirs(ExperimentBaseDirectoryResults)
    if not PreviousExperimentName is None:
        PreviousBaseDirectoryPrefix = os.path.realpath(os.path.join(BASEOUTPUTDIR, PreviousExperimentName))
        PreviousBaseDirectoryResults = PreviousBaseDirectoryPrefix + "_Results"
        assert os.path.exists(PreviousBaseDirectoryResults), "The previous experiment directory does not exist: {0}".format(PreviousBaseDirectoryResults)
    else:
        PreviousBaseDirectoryResults = None
    #    Define workup common reference data sets
    #    The ATLAS needs to be copied to the ExperimentBaseDirectoryPrefix
    #    The ATLAS pathing must stay constant
    ATLASPATH = expConfig.get(input_arguments.processingEnvironment, 'ATLASPATH')
    if not os.path.exists(ATLASPATH):
        print("ERROR:  Invalid Path for Atlas: {0}".format(ATLASPATH))
        sys.exit(-1)
    CACHE_ATLASPATH = os.path.realpath(os.path.join(ExperimentBaseDirectoryCache, 'Atlas'))
    from distutils.dir_util import copy_tree
    if not os.path.exists(CACHE_ATLASPATH):
        print("Copying a reference of the atlas to the experiment cache directory:\n    from: {0}\n    to: {1}".format(ATLASPATH, CACHE_ATLASPATH))
        copy_tree(ATLASPATH, CACHE_ATLASPATH, preserve_mode=1, preserve_times=1)
        ## Now generate the xml file with the correct pathing
        file_replace(os.path.join(ATLASPATH, 'ExtendedAtlasDefinition.xml.in'), os.path.join(CACHE_ATLASPATH, 'ExtendedAtlasDefinition.xml'), "@ATLAS_DIRECTORY@", CACHE_ATLASPATH)
    else:
        print("Atlas already exists in experiment cache directory: {0}".format(CACHE_ATLASPATH))

    CUSTOM_ENVIRONMENT = expConfig.get(input_arguments.processingEnvironment, 'CUSTOM_ENVIRONMENT')
    CUSTOM_ENVIRONMENT = eval(CUSTOM_ENVIRONMENT)
    ## Set custom environmental variables so that subproceses work properly (i.e. for FreeSurfer)
    # print CUSTOM_ENVIRONMENT
    for key, value in CUSTOM_ENVIRONMENT.items():
        # print "SETTING: ", key, value
        os.putenv(key, value)
        os.environ[key] = value
    # print os.environ
    # sys.exit(-1)

    WORKFLOW_COMPONENTS_STRING = expConfig.get('EXPERIMENT_DATA', 'WORKFLOW_COMPONENTS')
    WORKFLOW_COMPONENTS = eval(WORKFLOW_COMPONENTS_STRING)

    ## If freesurfer is requested, then ensure that a sane environment is available
    if 'FREESURFER' in WORKFLOW_COMPONENTS:
        print "FREESURFER NEEDS TO CHECK FOR SANE ENVIRONMENT HERE."

    ## Setup environment for CPU load balancing of ITK based programs.
    total_CPUS = multiprocessing.cpu_count()
    if input_arguments.wfrun == 'helium_all.q' or \
      input_arguments.wfrun == 'helium_all.q_graph' or \
      input_arguments.wfrun == 'ipl_OSX':
        assert expConfig.getboolean(input_arguments.processingEnvironment, 'CLUSTER'), "CLUSTER section not set to true!"
        CLUSTER_QUEUE, CLUSTER_QUEUE_LONG, QSTAT_IMMEDIATE_EXE, QSTAT_CACHED_EXE, MODULES = get_cluster_settings(expConfig)
    elif input_arguments.wfrun == 'local_4':
        os.environ['NSLOTS'] = "{0}".format(total_CPUS / 4)
    elif input_arguments.wfrun == 'local_12':
        os.environ['NSLOTS'] = "{0}".format(total_CPUS / 12)
    elif input_arguments.wfrun == 'local':
        # HACK
        CLUSTER_QUEUE, CLUSTER_QUEUE_LONG, QSTAT_IMMEDIATE_EXE, QSTAT_CACHED_EXE, MODULES = get_cluster_settings(expConfig)
        # END HACK
        os.environ['NSLOTS'] = "{0}".format(total_CPUS / 1)
    elif input_arguments.wfrun == 'ds_runner':
        os.environ['NSLOTS'] = "{0}".format(total_CPUS / 1)
    else:
        print "FAILED RUN: You must specify the run environment type. [helium_all.q,helium_all.q_graph,ipl_OSX,local_4,local_12,local,ds_runner]"
        print input_arguments.wfrun
        sys.exit(-1)

    print "Configuring Pipeline"
    ## Ensure that entire db is built and cached before parallel section starts.
    _ignoreme = OpenSubjectDatabase(ExperimentBaseDirectoryCache, [ "all" ], mountPrefix, subject_data_file)
    to_do_subjects = input_arguments.subject.split(',')
    if to_do_subjects[0] == "all":
        to_do_subjects=_ignoreme.getAllSubjects()
    _ignoreme = None

    ## Create the shell wrapper script for ensuring that all jobs running on remote hosts from SGE
    #  have the same environment as the job submission host.

    JOB_SCRIPT = get_global_sge_script(sys.path, os.environ['PATH'], CUSTOM_ENVIRONMENT, MODULES)
    print JOB_SCRIPT

    # Randomly shuffle to_do_subjects to get max
    import random
    random.shuffle(to_do_subjects)

    ## Make a list of all the arguments to be processed
    sp_args_list = list()
    start_time=time.time()
    subj_index = 1
    for subjectid in to_do_subjects:
        delay = 2.5*subj_index
        subj_index += 1
        print("START DELAY: {0}".format(delay))
        sp_args=(CACHE_ATLASPATH, CLUSTER_QUEUE, CLUSTER_QUEUE_LONG,QSTAT_IMMEDIATE_EXE,QSTAT_CACHED_EXE,
                                  ExperimentBaseDirectoryCache, ExperimentBaseDirectoryResults, subject_data_file,
                                  GLOBAL_DATA_SINK_REWRITE, JOB_SCRIPT, WORKFLOW_COMPONENTS, input_arguments,
                                  mountPrefix, start_time+delay, subjectid, PreviousBaseDirectoryResults)
        sp_args_list.append(sp_args)
    if 'local' in input_arguments.wfrun:
        print("RUNNING WITHOUT POOL BUILDING")
        for sp_args in sp_args_list:
            DoSingleSubjectProcessing(sp_args)
    else:
        ## Make a pool of workers to submit simultaneously
        from multiprocessing import Pool
        myPool = Pool(processes=64,maxtasksperchild=1)
        all_results=myPool.map_async(DoSingleSubjectProcessing,sp_args_list).get(1e100)

        for indx in range(0,len(sp_args_list)):
            if all_results[indx] == False:
                    print "FAILED for {0}".format(sp_args_list[indx][-1])

    print("THIS RUN OF BAW FOR SUBJS {0} HAS COMPLETED".format(to_do_subjects))
    return 0
示例#17
0
def main(argv=None):
    import argparse
    import ConfigParser
    import csv
    import string

    if argv == None:
        argv = sys.argv

    # Create and parse input arguments
    parser = argparse.ArgumentParser(description='Runs a mini version of BRAINSAutoWorkup')
    group = parser.add_argument_group('Required')
    group.add_argument('-pe', action="store", dest='processingEnvironment', required=True,
                       help='The name of the processing environment to use from the config file')
    group.add_argument('-wfrun', action="store", dest='wfrun', required=True,
                       help='The name of the workflow running plugin to use')
    group.add_argument('-subject', action="store", dest='subject', required=True,
                       help='The name of the subject to process')
    group.add_argument('-ExperimentConfig', action="store", dest='ExperimentConfig', required=True,
                       help='The path to the file that describes the entire experiment')
    parser.add_argument('-doshort', action='store', dest='doshort', default=False, help='If not present, do long')
    parser.add_argument('-rewrite_datasinks', action='store_true', default=False,
                        help='Use if the datasinks should be forced rerun.\nDefault: value in configuration file')
    parser.add_argument('--version', action='version', version='%(prog)s 1.0')
    input_arguments = parser.parse_args()

    expConfig = ConfigParser.ConfigParser()
    expConfig.read(input_arguments.ExperimentConfig)

    # Pipeline-specific information
    GLOBAL_DATA_SINK_REWRITE_FROM_CONFIG = expConfig.getboolean('PIPELINE', 'GLOBAL_DATA_SINK_REWRITE')
    GLOBAL_DATA_SINK_REWRITE=setDataSinkRewriteValue(input_arguments.rewrite_datasinks, GLOBAL_DATA_SINK_REWRITE_FROM_CONFIG)

    # Experiment specific information
    subject_data_file = expConfig.get('EXPERIMENT_DATA', 'SESSION_DB')
    ExperimentName = expConfig.get('EXPERIMENT_DATA', 'EXPERIMENTNAME')
    WORKFLOW_COMPONENTS_STRING = expConfig.get('EXPERIMENT_DATA', 'WORKFLOW_COMPONENTS')
    WORKFLOW_COMPONENTS = eval(WORKFLOW_COMPONENTS_STRING)

    # Platform specific information
    #     Prepend the python search paths
    PYTHON_AUX_PATHS = expConfig.get(input_arguments.processingEnvironment, 'PYTHON_AUX_PATHS')
    PYTHON_AUX_PATHS = PYTHON_AUX_PATHS.split(':')
    PYTHON_AUX_PATHS.extend(sys.path)
    sys.path = PYTHON_AUX_PATHS
    ######################################################################################
    ###### Now ensure that all the required packages can be read in from this custom path
    #\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/
    # print sys.path
    from nipype import config  # NOTE:  This needs to occur AFTER the PYTHON_AUX_PATHS has been modified
    config.enable_debug_mode()  # NOTE:  This needs to occur AFTER the PYTHON_AUX_PATHS has been modified
    ##############################################################################
    from nipype.interfaces.base import CommandLine, CommandLineInputSpec, TraitedSpec, File, Directory
    from nipype.interfaces.base import traits, isdefined, BaseInterface
    from nipype.interfaces.utility import Merge, Split, Function, Rename, IdentityInterface
    import nipype.interfaces.io as nio   # Data i/o
    import nipype.pipeline.engine as pe  # pypeline engine
    from nipype.interfaces.freesurfer import ReconAll

    from nipype.utils.misc import package_check
    # package_check('nipype', '5.4', 'tutorial1') ## HACK: Check nipype version
    package_check('numpy', '1.3', 'tutorial1')
    package_check('scipy', '0.7', 'tutorial1')
    package_check('networkx', '1.0', 'tutorial1')
    package_check('IPython', '0.10', 'tutorial1')

    ## Check to ensure that SimpleITK can be found
    import SimpleITK as sitk
    #\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/
    #####################################################################################
    #  FreeSurfer is extraordinarly finicky and is easily confused and incorrect.
    #  Force that all the FREESURFER env vars are set in subsequent scripts by
    #  ensuring that rough versions of these environmental variables are not
    #  set internal to this script.
    prohibited_env_var_exists = False
    for ENVVAR_TO_CHECK in ['FREESURFER_HOME','FSFAST_HOME','FSF_OUTPUT_FORMAT','SUBJECTS_DIR','MNI_DIR','FSL_DIR']:
       if os.environ.has_key(ENVVAR_TO_CHECK):
           prohibited_env_var_exists = True
           print( "ERROR: Environmental Variable {0}={1} exists.  Please unset before continuing.".format(ENVVAR_TO_CHECK,os.environ[ENVVAR_TO_CHECK]) )
    if prohibited_env_var_exists:
       sys.exit(-1)

    #\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/
    #####################################################################################
    #     Prepend the shell environment search paths
    PROGRAM_PATHS = expConfig.get(input_arguments.processingEnvironment, 'PROGRAM_PATHS')
    PROGRAM_PATHS = PROGRAM_PATHS.split(':')
    PROGRAM_PATHS.extend(os.environ['PATH'].split(':'))
    os.environ['PATH'] = ':'.join(PROGRAM_PATHS)
    #    Define platform specific output write paths
    mountPrefix = expConfig.get(input_arguments.processingEnvironment, 'MOUNTPREFIX')
    BASEOUTPUTDIR = expConfig.get(input_arguments.processingEnvironment, 'BASEOUTPUTDIR')
    ExperimentBaseDirectoryPrefix = os.path.realpath(os.path.join(BASEOUTPUTDIR, ExperimentName))
    ExperimentBaseDirectoryCache = ExperimentBaseDirectoryPrefix + "_CACHE"
    ExperimentBaseDirectoryResults = ExperimentBaseDirectoryPrefix + "_Results"
    if not os.path.exists(ExperimentBaseDirectoryCache):
        os.makedirs(ExperimentBaseDirectoryCache)
    if not os.path.exists(ExperimentBaseDirectoryResults):
        os.makedirs(ExperimentBaseDirectoryResults)
    #    Define workup common reference data sets
    #    The ATLAS needs to be copied to the ExperimentBaseDirectoryPrefix
    #    The ATLAS pathing must stay constant
    ATLASPATH = expConfig.get(input_arguments.processingEnvironment, 'ATLASPATH')
    if not os.path.exists(ATLASPATH):
        print("ERROR:  Invalid Path for Atlas: {0}".format(ATLASPATH))
        sys.exit(-1)
    CACHE_ATLASPATH = os.path.realpath(os.path.join(ExperimentBaseDirectoryCache, 'Atlas'))
    from distutils.dir_util import copy_tree
    if not os.path.exists(CACHE_ATLASPATH):
        print("Copying a reference of the atlas to the experiment cache directory:\n    from: {0}\n    to: {1}".format(ATLASPATH, CACHE_ATLASPATH))
        copy_tree(ATLASPATH, CACHE_ATLASPATH, preserve_mode=1, preserve_times=1)
        ## Now generate the xml file with the correct pathing
        file_replace(os.path.join(ATLASPATH, 'ExtendedAtlasDefinition.xml.in'), os.path.join(CACHE_ATLASPATH, 'ExtendedAtlasDefinition.xml'), "@ATLAS_DIRECTORY@", CACHE_ATLASPATH)
    else:
        print("Atlas already exists in experiment cache directory: {0}".format(CACHE_ATLASPATH))
    #  Just to be safe, copy the model file as well
    BCDMODELPATH = expConfig.get(input_arguments.processingEnvironment, 'BCDMODELPATH')
    CACHE_BCDMODELPATH = os.path.join(ExperimentBaseDirectoryCache, os.path.basename(BCDMODELPATH))
    from distutils.file_util import copy_file
    for BCDModelFile in ['LLSModel-2ndVersion.h5', 'T1-2ndVersion.mdl']:
        if BCDModelFile[-2:] == 'h5':
            BCDModelFile = os.path.join('Transforms_h5', BCDModelFile)
        orig = os.path.join(BCDMODELPATH, BCDModelFile)
        new = os.path.join(CACHE_BCDMODELPATH, BCDModelFile)
        new = new.replace('Transforms_h5/', '')  # Flatten back out, even if you needed to get files from subdirectory.
        if not os.path.exists(CACHE_BCDMODELPATH):
            os.mkdir(CACHE_BCDMODELPATH)
        if not os.path.exists(new):
            print("Copying BCD Model file to cache directory: {0}".format(new))
            copy_file(orig, new, preserve_mode=1, preserve_times=1)
        else:
            print("BCD Model exists in cache directory: {0}".format(new))

    CUSTOM_ENVIRONMENT = expConfig.get(input_arguments.processingEnvironment, 'CUSTOM_ENVIRONMENT')
    CUSTOM_ENVIRONMENT = eval(CUSTOM_ENVIRONMENT)
    ## Set custom environmental variables so that subproceses work properly (i.e. for FreeSurfer)
    # print CUSTOM_ENVIRONMENT
    for key, value in CUSTOM_ENVIRONMENT.items():
        # print "SETTING: ", key, value
        os.putenv(key, value)
        os.environ[key] = value
    # print os.environ
    # sys.exit(-1)

    ## If freesurfer is requested, then ensure that a sane environment is available
    if 'FREESURFER' in WORKFLOW_COMPONENTS:
        print "FREESURFER NEEDS TO CHECK FOR SANE ENVIRONMENT HERE."

    CLUSTER_QUEUE = expConfig.get(input_arguments.processingEnvironment, 'CLUSTER_QUEUE')
    CLUSTER_QUEUE_LONG = expConfig.get(input_arguments.processingEnvironment, 'CLUSTER_QUEUE_LONG')

    ## Setup environment for CPU load balancing of ITK based programs.
    import multiprocessing
    total_CPUS = multiprocessing.cpu_count()
    if input_arguments.wfrun == 'helium_all.q':
        pass
    elif input_arguments.wfrun == 'helium_all.q_graph':
        pass
    elif input_arguments.wfrun == 'ipl_OSX':
        pass
    elif input_arguments.wfrun == 'local_4':
        os.environ['NSLOTS'] = "{0}".format(total_CPUS / 4)
    elif input_arguments.wfrun == 'local_12':
        os.environ['NSLOTS'] = "{0}".format(total_CPUS / 12)
    elif input_arguments.wfrun == 'local':
        os.environ['NSLOTS'] = "{0}".format(total_CPUS / 1)
    elif input_arguments.wfrun == 'ds_runner':
        os.environ['NSLOTS'] = "{0}".format(total_CPUS / 1)
    else:
        print "FAILED RUN: You must specify the run environment type. [helium_all.q,helium_all.q_graph,ipl_OSX,local_4,local_12,local,ds_runner]"
        print input_arguments.wfrun
        sys.exit(-1)

    print "Configuring Pipeline"
    import SessionDB
    subjectDatabaseFile = os.path.join(ExperimentBaseDirectoryCache, 'InternalWorkflowSubjectDB.db')
    subject_list = input_arguments.subject.split(',')
    ## TODO:  Only make DB if db is older than subject_data_file.
    if (not os.path.exists(subjectDatabaseFile)) or (os.path.getmtime(subjectDatabaseFile) < os.path.getmtime(subject_data_file)):
        ExperimentDatabase = SessionDB.SessionDB(subjectDatabaseFile, subject_list)
        ExperimentDatabase.MakeNewDB(subject_data_file, mountPrefix)
        ExperimentDatabase = None
        ExperimentDatabase = SessionDB.SessionDB(subjectDatabaseFile, subject_list)
    else:
        print("Using cached database, {0}".format(subjectDatabaseFile))
        ExperimentDatabase = SessionDB.SessionDB(subjectDatabaseFile, subject_list)
    print "ENTIRE DB for {_subjid}: ".format(_subjid=ExperimentDatabase.getSubjectFilter())
    print "^^^^^^^^^^^^^"
    for row in ExperimentDatabase.getEverything():
        print row
    print "^^^^^^^^^^^^^"

    ## Create the shell wrapper script for ensuring that all jobs running on remote hosts from SGE
    #  have the same environment as the job submission host.
    JOB_SCRIPT = get_global_sge_script(sys.path, PROGRAM_PATHS, CUSTOM_ENVIRONMENT)
    print JOB_SCRIPT

    import WorkupT1T2  # NOTE:  This needs to occur AFTER the PYTHON_AUX_PATHS has been modified
    print "TESTER"
    import ShortWorkupT1T2
    for subjectid in ExperimentDatabase.getAllSubjects():
        if input_arguments.doshort:
            baw200 = ShortWorkupT1T2.ShortWorkupT1T2(subjectid, mountPrefix,
                                                     os.path.join(ExperimentBaseDirectoryCache, str(subjectid)),
                                                     ExperimentBaseDirectoryResults,
                                                     ExperimentDatabase,
                                                     CACHE_ATLASPATH,
                                                     CACHE_BCDMODELPATH,
                                                     GLOBAL_DATA_SINK_REWRITE,
                                                     WORKFLOW_COMPONENTS=WORKFLOW_COMPONENTS, CLUSTER_QUEUE=CLUSTER_QUEUE, CLUSTER_QUEUE_LONG=CLUSTER_QUEUE_LONG)
        else:
            baw200 = WorkupT1T2.WorkupT1T2(subjectid, mountPrefix,
                                           os.path.join(ExperimentBaseDirectoryCache, str(subjectid)),
                                           ExperimentBaseDirectoryResults,
                                           ExperimentDatabase,
                                           CACHE_ATLASPATH,
                                           CACHE_BCDMODELPATH,
                                           GLOBAL_DATA_SINK_REWRITE,
                                           WORKFLOW_COMPONENTS=WORKFLOW_COMPONENTS, CLUSTER_QUEUE=CLUSTER_QUEUE, CLUSTER_QUEUE_LONG=CLUSTER_QUEUE_LONG, SGE_JOB_SCRIPT=JOB_SCRIPT)
        print "Start Processing"

        SGEFlavor = 'SGE'
        try:
            if input_arguments.wfrun == 'helium_all.q':
                try:
                    baw200.write_graph()
                except:
                    pass
                baw200.run(plugin=SGEFlavor,
                           plugin_args=dict(template=JOB_SCRIPT, qsub_args="-S /bin/bash -cwd -pe smp1 1-12 -l h_vmem=19G,mem_free=9G -o /dev/null -e /dev/null " + CLUSTER_QUEUE))
            elif input_arguments.wfrun == 'helium_all.q_graph':
                try:
                    baw200.write_graph()
                except:
                    pass
                SGEFlavor = 'SGEGraph'  # Use the SGEGraph processing
                baw200.run(plugin=SGEFlavor,
                           plugin_args=dict(template=JOB_SCRIPT, qsub_args="-S /bin/bash -cwd -pe smp1 1-12 -l h_vmem=19G,mem_free=9G -o /dev/null -e /dev/null " + CLUSTER_QUEUE))
            elif input_arguments.wfrun == 'ipl_OSX':
                try:
                    baw200.write_graph()
                except:
                    pass
                print "Running On ipl_OSX"
                baw200.run(plugin=SGEFlavor,
                           plugin_args=dict(template=JOB_SCRIPT, qsub_args="-S /bin/bash -cwd -pe smp1 1-12 -l h_vmem=19G,mem_free=9G -o /dev/null -e /dev/null " + CLUSTER_QUEUE))
            elif input_arguments.wfrun == 'local_4':
                try:
                    baw200.write_graph()
                except:
                    pass
                print "Running with 4 parallel processes on local machine"
                baw200.run(plugin='MultiProc', plugin_args={'n_procs': 4})
            elif input_arguments.wfrun == 'local_12':
                try:
                    baw200.write_graph()
                except:
                    pass
                print "Running with 12 parallel processes on local machine"
                baw200.run(plugin='MultiProc', plugin_args={'n_procs': 12})
            elif input_arguments.wfrun == 'ds_runner':
                class ds_runner(object):
                    def run(self, graph, **kwargs):
                        for node in graph.nodes():
                            if '_ds' in node.name.lower():
                                node.run()
                baw200.run(plugin=ds_runner())
            elif input_arguments.wfrun == 'local':
                try:
                    baw200.write_graph()
                except:
                    pass
                print "Running sequentially on local machine"
                # baw200.run(updatehash=True)
                baw200.run()
            else:
                print "You must specify the run environment type. [helium_all.q,helium_all.q_graph,ipl_OSX,local_4,local_12,local]"
                print input_arguments.wfrun
                sys.exit(-1)
        except Exception, err:
            print("ERROR: EXCEPTION CAUGHT IN RUNNING SUBJECT {0}".format(subjectid))
            raise err
示例#18
0
    CommandLineInputSpec,
    TraitedSpec,
    File,
    Directory,
)
from nipype.interfaces.base import traits, isdefined, BaseInterface
from nipype.interfaces.utility import Merge, Split, Function, Rename, IdentityInterface
import nipype.interfaces.io as nio  # Data i/o
import nipype.pipeline.engine as pe  # pypeline engine
import nipype.interfaces.io as nio  # Data i/o
from nipype.interfaces.freesurfer import ReconAll

from nipype.utils.misc import package_check

# package_check('nipype', '5.4', 'tutorial1') ## HACK: Check nipype version
package_check("numpy", "1.3", "tutorial1")
package_check("scipy", "0.7", "tutorial1")
package_check("networkx", "1.0", "tutorial1")
package_check("IPython", "0.10", "tutorial1")

import os
from collections import (
    OrderedDict, )  # Need OrderedDict internally to ensure consistent ordering

## Check to ensure that SimpleITK can be found
# import SimpleITK as sitk

SLICER_REFERENCE_DIR = "/scratch/DWI_DATA"
SLICER_RESULTS_DIR = "/scratch/DWI_DATA/SlicerResults"
ExperimentBaseDirectoryCache = SLICER_RESULTS_DIR
示例#19
0
"""

import warnings
import numpy as np
import tempfile
from nipype.utils.misc import package_check

from nipype.interfaces.base import (TraitedSpec, File, Undefined, traits,
                                    BaseInterface, isdefined,
                                    BaseInterfaceInputSpec)

from nipype.utils.filemanip import fname_presuffix

have_nitime = True
try:
    package_check('nitime')
except Exception, e:
    have_nitime = False
    warnings.warn('nitime not installed')
else:
    import nitime.analysis as nta
    from nitime.timeseries import TimeSeries
    import nitime.viz as viz


class CoherenceAnalyzerInputSpec(BaseInterfaceInputSpec):

    #Input either csv file, or time-series object and use _xor_inputs to
    #discriminate
    _xor_inputs = ('in_file', 'in_TS')
    in_file = File(desc=('csv file with ROIs on the columns and '
示例#20
0
def MasterProcessingController(argv=None):
    import argparse
    import configparser
    import csv
    import string

    if argv == None:
        argv = sys.argv

    # Create and parse input arguments
    parser = argparse.ArgumentParser(description='Runs a mini version of BRAINSAutoWorkup')
    group = parser.add_argument_group('Required')
    group.add_argument('-pe', action="store", dest='processingEnvironment', required=True,
                       help='The name of the processing environment to use from the config file')
    group.add_argument('-wfrun', action="store", dest='wfrun', required=True,
                       help='The name of the workflow running plugin to use')
    group.add_argument('-subject', action="store", dest='subject', required=True,
                       help='The name of the subject to process')
    group.add_argument('-ExperimentConfig', action="store", dest='ExperimentConfig', required=True,
                       help='The path to the file that describes the entire experiment')
    parser.add_argument('-rewrite_datasinks', action='store_true', default=False,
                        help='Use if the datasinks should be forced rerun.\nDefault: value in configuration file')
    parser.add_argument('--version', action='version', version='%(prog)s 1.0')
    args = parser.parse_args()

    config = configparser.ConfigParser(allow_no_value=True)
    config.read(args.ExperimentConfig)

    # Pipeline-specific information
    GLOBAL_DATA_SINK_REWRITE = setDataSinkRewriteValue(args.rewrite_datasinks,
                                                       config.getboolean('NIPYPE', 'GLOBAL_DATA_SINK_REWRITE'))
    experiment = get_experiment_settings(config)
    # Platform specific information
    environment = get_environment_settings(config)
    if environment['cluster']:
        cluster = get_cluster_settings(config)
    sys.path = environment('PYTHONPATH')
    os.environ['PATH'] = ':'.join(environment['PATH'])
    # Virtualenv
    if not environment['virtualenv_dir'] is None:
        print("Loading virtualenv_dir...")
        execfile(environment['virtualenv_dir'], dict(__file__=environment['virtualenv_dir']))
    ###### Now ensure that all the required packages can be read in from this custom path
    # \/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/
    # print sys.path
    ## Check to ensure that SimpleITK can be found
    import SimpleITK as sitk
    from nipype import config  # NOTE:  This needs to occur AFTER the PYTHON_AUX_PATHS has been modified
    config.enable_debug_mode()  # NOTE:  This needs to occur AFTER the PYTHON_AUX_PATHS has been modified
    # config.enable_provenance()

    ##############################################################################
    from nipype.interfaces.base import CommandLine, CommandLineInputSpec, TraitedSpec, File, Directory
    from nipype.interfaces.base import traits, isdefined, BaseInterface
    from nipype.interfaces.utility import Merge, Split, Function, Rename, IdentityInterface
    import nipype.interfaces.io as nio  # Data i/o
    import nipype.pipeline.engine as pe  # pypeline engine
    from nipype.interfaces.freesurfer import ReconAll

    from nipype.utils.misc import package_check
    # package_check('nipype', '5.4', 'tutorial1') ## HACK: Check nipype version
    package_check('numpy', '1.3', 'tutorial1')
    package_check('scipy', '0.7', 'tutorial1')
    package_check('networkx', '1.0', 'tutorial1')
    package_check('IPython', '0.10', 'tutorial1')

    try:
        verify_empty_freesurfer_env()
    except EnvironmentError:
        raise

    # Define platform specific output write paths
    if not os.path.exists(experiment['output_cache']):
        os.makedirs(experiment['output_cache'])
    if not os.path.exists(experiment['output_results']):
        os.makedirs(experiment['output_results'])
    if 'input_results' in list(experiment.keys()):
        assert os.path.exists(
            experiment['input_results']), "The previous experiment directory does not exist: {0}".format(
            experiment['input_results'])

    # \/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/
    #    Define platform specific output write paths
    mountPrefix = expConfig.get(input_arguments.processingEnvironment, 'MOUNTPREFIX')
    BASEOUTPUTDIR = expConfig.get(input_arguments.processingEnvironment, 'BASEOUTPUTDIR')
    ExperimentBaseDirectoryPrefix = os.path.realpath(os.path.join(BASEOUTPUTDIR, ExperimentName))
    ExperimentBaseDirectoryCache = ExperimentBaseDirectoryPrefix + "_CACHE"
    ExperimentBaseDirectoryResults = ExperimentBaseDirectoryPrefix + "_Results"
    if not os.path.exists(ExperimentBaseDirectoryCache):
        os.makedirs(ExperimentBaseDirectoryCache)
    if not os.path.exists(ExperimentBaseDirectoryResults):
        os.makedirs(ExperimentBaseDirectoryResults)
    if not PreviousExperimentName is None:
        PreviousBaseDirectoryPrefix = os.path.realpath(os.path.join(BASEOUTPUTDIR, PreviousExperimentName))
        PreviousBaseDirectoryResults = PreviousBaseDirectoryPrefix + "_Results"
        assert os.path.exists(
            PreviousBaseDirectoryResults), "The previous experiment directory does not exist: {0}".format(
            PreviousBaseDirectoryResults)
    else:
        PreviousBaseDirectoryResults = None
    # Define workup common reference data sets
    #    The ATLAS needs to be copied to the ExperimentBaseDirectoryPrefix
    #    The ATLAS pathing must stay constant
    ATLASPATH = expConfig.get(input_arguments.processingEnvironment, 'ATLASPATH')
    if not os.path.exists(ATLASPATH):
        print("ERROR:  Invalid Path for Atlas: {0}".format(ATLASPATH))
        sys.exit(-1)
    CACHE_ATLASPATH = os.path.realpath(os.path.join(ExperimentBaseDirectoryCache, 'Atlas'))
    from distutils.dir_util import copy_tree
    if not os.path.exists(CACHE_ATLASPATH):
        print("Copying a reference of the atlas to the experiment cache directory:\n    from: {0}\n    to: {1}".format(
            ATLASPATH, CACHE_ATLASPATH))
        copy_tree(ATLASPATH, CACHE_ATLASPATH, preserve_mode=1, preserve_times=1)
        ## Now generate the xml file with the correct pathing
        file_replace(os.path.join(ATLASPATH, 'ExtendedAtlasDefinition.xml.in'),
                     os.path.join(CACHE_ATLASPATH, 'ExtendedAtlasDefinition.xml'), "@ATLAS_INSTALL_DIRECTORY@",
                     CACHE_ATLASPATH)
    else:
        print("Atlas already exists in experiment cache directory: {0}".format(CACHE_ATLASPATH))

    ## Set custom environmental variables so that subproceses work properly (i.e. for FreeSurfer)
    CUSTOM_ENVIRONMENT = eval(environment['misc'])
    # print CUSTOM_ENVIRONMENT
    for key, value in list(CUSTOM_ENVIRONMENT.items()):
        # print "SETTING: ", key, value
        os.putenv(key, value)
        os.environ[key] = value
    # print os.environ
    # sys.exit(-1)

    WORKFLOW_COMPONENTS = experiment['components']
    if 'FREESURFER' in WORKFLOW_COMPONENTS:
        check_freesurfer_environment()

    cluster = setup_cpu(args.wfrun, config)  # None unless wfrun is 'helium*' or 'ipl_OSX', then dict()

    print("Configuring Pipeline")
    ## Ensure that entire db is built and cached before parallel section starts.
    _ignoreme = OpenSubjectDatabase(experiment['output_cache'], ["all"], environment['prefix'],
                                    environment['subject_data_file'])
    to_do_subjects = args.subject.split(',')
    if to_do_subjects[0] == "all":
        to_do_subjects = _ignoreme.getAllSubjects()
    _ignoreme = None

    ## Create the shell wrapper script for ensuring that all jobs running on remote hosts from SGE
    #  have the same environment as the job submission host.

    JOB_SCRIPT = get_global_sge_script(sys.path, os.environ['PATH'], CUSTOM_ENVIRONMENT, MODULES)
    print(JOB_SCRIPT)

    # Randomly shuffle to_do_subjects to get max
    import random
    random.shuffle(to_do_subjects)

    ## Make a list of all the arguments to be processed
    sp_args_list = list()
    start_time = time.time()
    subj_index = 1
    for subjectid in to_do_subjects:
        delay = 2.5 * subj_index
        subj_index += 1
        print("START DELAY: {0}".format(delay))
        sp_args = (CACHE_ATLASPATH, CLUSTER_QUEUE, CLUSTER_QUEUE_LONG, QSTAT_IMMEDIATE_EXE, QSTAT_CACHED_EXE,
                   experiment['output_cache'], experiment['output_results'], environment['subject_data_file'],
                   GLOBAL_DATA_SINK_REWRITE, JOB_SCRIPT, WORKFLOW_COMPONENTS, args,
                   mountPrefix, start_time + delay, subjectid, PreviousBaseDirectoryResult)
        sp_args_list.append(sp_args)
    if 'local' in args.wfrun:
        print("RUNNING WITHOUT POOL BUILDING")
        for sp_args in sp_args_list:
            DoSingleSubjectProcessing(sp_args)
    else:
        ## Make a pool of workers to submit simultaneously
        from multiprocessing import Pool
        myPool = Pool(processes=64, maxtasksperchild=1)
        all_results = myPool.map_async(DoSingleSubjectProcessing, sp_args_list).get(1e100)

        for indx in range(0, len(sp_args_list)):
            if all_results[indx] == False:
                print("FAILED for {0}".format(sp_args_list[indx][-1]))

    print("THIS RUN OF BAW FOR SUBJS {0} HAS COMPLETED".format(to_do_subjects))
    return 0
示例#21
0
# vi: set ft=python sts=4 ts=4 sw=4 et:
from nipype.interfaces.base import (BaseInterface, BaseInterfaceInputSpec, traits,
                                    File, TraitedSpec, InputMultiPath,
                                    OutputMultiPath, isdefined)
import os.path as op
import numpy as np
import networkx as nx
from nipype.utils.misc import package_check
import warnings

from ... import logging
iflogger = logging.getLogger('interface')

have_cv = True
try:
    package_check('cviewer')
except Exception, e:
    have_cv = False
else:
    import cviewer.libs.pyconto.groupstatistics.nbs as nbs


def ntwks_to_matrices(in_files, edge_key):
    first = nx.read_gpickle(in_files[0])
    files = len(in_files)
    nodes = len(first.nodes())
    matrix = np.zeros((nodes, nodes, files))
    for idx, name in enumerate(in_files):
        graph = nx.read_gpickle(name)
        for u, v, d in graph.edges(data=True):
            graph[u][v]['weight'] = d[edge_key]  # Setting the edge requested edge value as weight value
示例#22
0
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""Utility routines for workflow graphs
"""

from copy import deepcopy
from glob import glob
from collections import defaultdict
import os
import re

import numpy as np
from nipype.utils.misc import package_check
from nipype.external import six

package_check("networkx", "1.3")

import networkx as nx

from ..utils.filemanip import fname_presuffix, FileNotFoundError, filename_to_list, get_related_files
from ..utils.misc import create_function_from_source, str2bool
from ..interfaces.base import CommandLine, isdefined, Undefined, InterfaceResult
from ..interfaces.utility import IdentityInterface
from ..utils.provenance import ProvStore, pm, nipype_ns, get_id

from .. import logging, config

logger = logging.getLogger("workflow")

try:
    dfs_preorder = nx.dfs_preorder
示例#23
0
# -*- coding: utf-8 -*-
from nipype.interfaces.base import (TraitedSpec, BaseInterface,
                                    BaseInterfaceInputSpec, File, isdefined,
                                    traits)
from nipype.utils.filemanip import split_filename
import os.path as op
import nibabel as nb, nibabel.trackvis as trk
from nipype.utils.misc import package_check
import warnings

from ... import logging
iflogger = logging.getLogger('interface')

try:
    package_check('dipy')
    from dipy.tracking.utils import density_map
except Exception, e:
    warnings.warn('dipy not installed')


class TrackDensityMapInputSpec(TraitedSpec):
    in_file = File(exists=True,
                   mandatory=True,
                   desc='The input TrackVis track file')
    voxel_dims = traits.List(traits.Float,
                             minlen=3,
                             maxlen=3,
                             desc='The size of each voxel in mm.')
    data_dims = traits.List(traits.Int,
                            minlen=3,
                            maxlen=3,
示例#24
0
    CommandLineInputSpec,
    TraitedSpec,
    File,
    Directory,
    traits,
    isdefined,
    BaseInterface,
)
from nipype.interfaces.utility import Merge, Split, Function, Rename

import nipype.interfaces.io as nio  # Data i/o
import nipype.pipeline.engine as pe  # pypeline engine

from nipype.utils.misc import package_check

package_check("numpy", "1.3", "tutorial1")
package_check("scipy", "0.7", "tutorial1")
package_check("networkx", "1.0", "tutorial1")
package_check("IPython", "0.10", "tutorial1")

from BRAINSConstellationDetector import *
from BRAINSABC import *
from BRAINSDemonWarp import *
from BRAINSFit import *
from BRAINSMush import *
from BRAINSResample import *
from BRAINSROIAuto import *

import os, sys, string, shutil, glob, re

## HACK:  This should be more elegant, and should use a class
示例#25
0
"""

import os, os.path as op
import datetime
import string
import warnings
import networkx as nx

from nipype.interfaces.base import (BaseInterface, BaseInterfaceInputSpec, traits,
                                    File, TraitedSpec, InputMultiPath, isdefined)
from nipype.utils.filemanip import split_filename
from nipype.utils.misc import package_check

have_cfflib = True
try:
    package_check('cfflib')
except Exception, e:
    have_cfflib = False
else:
    import cfflib as cf


class CFFConverterInputSpec(BaseInterfaceInputSpec):
    graphml_networks = InputMultiPath(File(exists=True), desc='list of graphML networks')
    gpickled_networks = InputMultiPath(File(exists=True), desc='list of gpickled Networkx graphs')

    gifti_surfaces = InputMultiPath(File(exists=True), desc='list of GIFTI surfaces')
    gifti_labels = InputMultiPath(File(exists=True), desc='list of GIFTI labels')
    nifti_volumes = InputMultiPath(File(exists=True), desc='list of NIFTI volumes')
    tract_files = InputMultiPath(File(exists=True), desc='list of Trackvis fiber files')
示例#26
0
文件: tracks.py 项目: B-Rich/nipype
# -*- coding: utf-8 -*-
from nipype.interfaces.base import (TraitedSpec, BaseInterface, BaseInterfaceInputSpec,
                                    File, isdefined, traits)
from nipype.utils.filemanip import split_filename
import os.path as op
import nibabel as nb, nibabel.trackvis as trk
from nipype.utils.misc import package_check
import warnings

from ... import logging
iflogger = logging.getLogger('interface')

try:
    package_check('dipy')
    from dipy.tracking.utils import density_map
except Exception, e:
    warnings.warn('dipy not installed')


class TrackDensityMapInputSpec(TraitedSpec):
    in_file = File(exists=True, mandatory=True,
    desc='The input TrackVis track file')
    voxel_dims = traits.List(traits.Float, minlen=3, maxlen=3,
    desc='The size of each voxel in mm.')
    data_dims = traits.List(traits.Int, minlen=3, maxlen=3,
    desc='The size of the image in voxels.')
    out_filename = File('tdi.nii', usedefault=True, desc='The output filename for the tracks in TrackVis (.trk) format')

class TrackDensityMapOutputSpec(TraitedSpec):
    out_file = File(exists=True)
示例#27
0
                                    OutputMultiPath, isdefined)
from nipype.utils.filemanip import split_filename
import os, os.path as op
import numpy as np
import networkx as nx
import scipy.io as sio
import pickle
from nipype.utils.misc import package_check
import warnings

from ... import logging
iflogger = logging.getLogger('interface')

have_cmp = True
try:
    package_check('cmp')
except Exception, e:
    have_cmp = False
else:
    import cmp


def read_unknown_ntwk(ntwk):
	if not isinstance(ntwk, nx.classes.graph.Graph):
		path, name, ext = split_filename(ntwk)
		if ext == '.pck':
			ntwk = nx.read_gpickle(ntwk)
		elif ext == '.graphml':
			ntwk = nx.read_graphml(ntwk)
	return ntwk
示例#28
0
# -*- coding: utf-8 -*-
from nipype.interfaces.base import (TraitedSpec, BaseInterface, BaseInterfaceInputSpec,
                                    File, isdefined, traits)
from nipype.utils.filemanip import split_filename
import os.path as op
import nibabel as nb, nibabel.trackvis as trk
from nipype.utils.misc import package_check
import warnings

from ... import logging
iflogger = logging.getLogger('interface')

have_dipy = True
try:
    package_check('dipy', version='0.6.0')
except Exception, e:
    have_dipy = False
else:
    from dipy.tracking.utils import density_map


class TrackDensityMapInputSpec(TraitedSpec):
    in_file = File(exists=True, mandatory=True,
    desc='The input TrackVis track file')
    voxel_dims = traits.List(traits.Float, minlen=3, maxlen=3,
    desc='The size of each voxel in mm.')
    data_dims = traits.List(traits.Int, minlen=3, maxlen=3,
    desc='The size of the image in voxels.')
    out_filename = File('tdi.nii', usedefault=True, desc='The output filename for the tracks in TrackVis (.trk) format')

class TrackDensityMapOutputSpec(TraitedSpec):
import nipype.interfaces.freesurfer as fs    # freesurfer
import nipype.interfaces.mrtrix as mrtrix
import nipype.algorithms.misc as misc
import nipype.interfaces.cmtk as cmtk
import nipype.interfaces.dipy as dipy
import inspect
import os, os.path as op                      # system functions
from nipype.workflows.dmri.fsl.dti import create_eddy_correct_pipeline
from nipype.workflows.dmri.camino.connectivity_mapping import select_aparc_annot
from nipype.utils.misc import package_check
import warnings
from nipype.workflows.dmri.connectivity.nx import create_networkx_pipeline, create_cmats_to_csv_pipeline
from nipype.workflows.smri.freesurfer import create_tessellation_flow

try:
    package_check('cmp')
except Exception, e:
    warnings.warn('cmp not installed')
else:
    import cmp

"""
This needs to point to the freesurfer subjects directory (Recon-all must have been run on subj1 from the FSL course data)
Alternatively, the reconstructed subject data can be downloaded from:

	* http://dl.dropbox.com/u/315714/subj1.zip

"""

subjects_dir = op.abspath(op.join(op.curdir,'./subjects'))
fs.FSCommand.set_default_subjects_dir(subjects_dir)
示例#30
0
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""

Interfaces to functionality from nitime for time-series analysis of fmri data 

- nitime.analysis.CoherenceAnalyzer: Coherence/y 
- nitime.fmri.io:  
- nitime.viz.drawmatrix_channels

"""

import numpy as np
import tempfile
from nipype.utils.misc import package_check
package_check('nitime')
package_check('matplotlib')


from nipype.interfaces.base import (TraitedSpec, File, InputMultiPath,
                                    OutputMultiPath, Undefined, traits,
                                    BaseInterface, isdefined,
    BaseInterfaceInputSpec)

from nipype.utils.filemanip import fname_presuffix

import nitime.analysis as nta
from nitime.timeseries import TimeSeries
import nitime.viz as viz
    
class CoherenceAnalyzerInputSpec(BaseInterfaceInputSpec):
示例#31
0
import os

import nibabel as nb
import numpy as np

from nipype.utils.misc import package_check
package_check('nipy')
from nipy.labs.mask import compute_mask

from nipype.interfaces.base import (TraitedSpec, BaseInterface, traits,
                                    BaseInterfaceInputSpec, isdefined, File)

class ComputeMaskInputSpec(BaseInterfaceInputSpec):
    mean_volume = File(exists=True, mandatory=True, desc="mean EPI image, used to compute the threshold for the mask")
    reference_volume = File(exists=True, desc="reference volume used to compute the mask. If none is give, the \
        mean volume is used.")
    m = traits.Float(desc="lower fraction of the histogram to be discarded")
    M = traits.Float(desc="upper fraction of the histogram to be discarded")
    cc = traits.Bool(desc="if True, only the largest connect component is kept") 
    
class ComputeMaskOutputSpec(TraitedSpec):
    brain_mask = File(exists=True)
    
class ComputeMask(BaseInterface):
    input_spec = ComputeMaskInputSpec
    output_spec = ComputeMaskOutputSpec
    
    def _run_interface(self, runtime):
        
        args = {}
        for key in [k for k,_ in self.inputs.items() if k not in BaseInterfaceInputSpec().trait_names()]:
示例#32
0
from nipype.utils.filemanip import split_filename
import os.path as op
import nibabel as nb
import numpy as np
from nipype.utils.misc import package_check
import warnings

from multiprocessing import Process, Pool, cpu_count, pool, Manager, TimeoutError

from ... import logging

iflogger = logging.getLogger("interface")

have_dipy = True
try:
    package_check("dipy", version="0.8.0")
except Exception, e:
    have_dipy = False
else:
    import numpy as np
    from dipy.sims.voxel import multi_tensor, add_noise, all_tensor_evecs
    from dipy.core.gradients import gradient_table


class SimulateMultiTensorInputSpec(BaseInterfaceInputSpec):
    in_dirs = InputMultiPath(File(exists=True), mandatory=True, desc="list of fibers (principal directions)")
    in_frac = InputMultiPath(File(exists=True), mandatory=True, desc=("volume fraction of each fiber"))
    in_vfms = InputMultiPath(File(exists=True), mandatory=True, desc=("volume fractions of isotropic " "compartiments"))
    in_mask = File(exists=True, desc="mask to simulate data")

    diff_iso = traits.List(
示例#33
0
#"""Import necessary modules from nipype."""
# from nipype.utils.config import config
# config.set('logging', 'log_to_file', 'false')
# config.set_log_dir(os.getcwd())
#--config.set('logging', 'workflow_level', 'DEBUG')
#--config.set('logging', 'interface_level', 'DEBUG')
#--config.set('execution','remove_unnecessary_outputs','false')

import nipype.pipeline.engine as pe
import nipype.interfaces.io as nio

from nipype.interfaces.utility import IdentityInterface, Function

from nipype.utils.misc import package_check
# package_check('nipype', '5.4', 'tutorial1') ## HACK: Check nipype version
package_check('numpy', '1.3', 'tutorial1')
package_check('scipy', '0.7', 'tutorial1')
# THIS IS NOT REQUIRED package_check('matplotlib','1.4','turorial1')
package_check('networkx', '1.0', 'tutorial1')
package_check('IPython', '0.10', 'tutorial1')

from utilities.distributed import modify_qsub_args
from PipeLineFunctionHelpers import convertToList, FixWMPartitioning, AccumulateLikeTissuePosteriors
from PipeLineFunctionHelpers import UnwrapPosteriorImagesFromDictionaryFunction as flattenDict

from .WorkupT1T2LandmarkInitialization import CreateLandmarkInitializeWorkflow
from .WorkupT1T2TissueClassify import CreateTissueClassifyWorkflow
from .WorkupT1T2MALF import CreateMALFWorkflow
from .WorkupAddsonBrainStem import CreateBrainstemWorkflow

from utilities.misc import *
示例#34
0
   :style: UML
"""

from version import version as __version__

__status__   = 'alpha'
__url__     = 'http://nipy.org/'



# We require numpy 1.2 for our test suite.  If Tester fails to import,
# check the version of numpy the user has and inform them they need to
# upgrade.

from nipype.utils.misc import package_check
package_check('numpy', version='1.1')
import numpy as np
from distutils.version import LooseVersion
if LooseVersion(np.__version__) >= '1.2':
    from numpy.testing import Tester
else:
    from testing.numpytesting import Tester

class NipypeTester(Tester):
    def test(self, label='fast', verbose=1, extra_argv=None,
             doctests=False, coverage=False):
        # setuptools does a chmod +x on ALL python modules when it
        # installs.  By default, as a security measure, nose refuses to
        # import executable files.  To forse nose to execute our tests, we
        # must supply the '--exe' flag.  List thread on this:
        # http://www.mail-archive.com/[email protected]/msg05009.html
示例#35
0
#"""Import necessary modules from nipype."""
# from nipype.utils.config import config
# config.set('logging', 'log_to_file', 'false')
# config.set_log_dir(os.getcwd())
#--config.set('logging', 'workflow_level', 'DEBUG')
#--config.set('logging', 'interface_level', 'DEBUG')
#--config.set('execution','remove_unnecessary_outputs','false')

import nipype.pipeline.engine as pe
import nipype.interfaces.io as nio

from nipype.interfaces.utility import IdentityInterface, Function

from nipype.utils.misc import package_check
# package_check('nipype', '5.4', 'tutorial1') ## HACK: Check nipype version
package_check('numpy', '1.3', 'tutorial1')
package_check('scipy', '0.7', 'tutorial1')
# THIS IS NOT REQUIRED package_check('matplotlib','1.4','turorial1')
package_check('networkx', '1.0', 'tutorial1')
package_check('IPython', '0.10', 'tutorial1')

from utilities.distributed import modify_qsub_args
from PipeLineFunctionHelpers import convertToList, FixWMPartitioning, AccumulateLikeTissuePosteriors
from PipeLineFunctionHelpers import UnwrapPosteriorImagesFromDictionaryFunction as flattenDict

from WorkupT1T2LandmarkInitialization import CreateLandmarkInitializeWorkflow
from WorkupT1T2TissueClassify import CreateTissueClassifyWorkflow
from WorkupT1T2MALF import CreateMALFWorkflow
from WorkupAddsonBrainStem import CreateBrainstemWorkflow

from utilities.misc import *
示例#36
0
A pipeline to perform TBSS.
"""
"""
Tell python where to find the appropriate functions.
"""
import nipype.interfaces.io as nio  # Data i/o
import nipype.interfaces.fsl as fsl  # fsl
import nipype.interfaces.utility as util  # utility
import nipype.pipeline.engine as pe  # pypeline engine
import os  # system functions
from nipype.workflows.fsl import tbss
"""
Confirm package dependencies installed.
"""
from nipype.utils.misc import package_check
package_check('numpy', '1.3', 'tbss_test')
package_check('scipy', '0.7', 'tbss_test')
package_check('networkx', '1.0', 'tbss_test')
package_check('IPython', '0.10', 'tbss_test')
"""
Specify the related directories
"""
dataDir = '/nfs/s2/dticenter/data4test/tbss/mydata'
workingdir = '/nfs/s2/dticenter/data4test/tbss/tbss_test_workingdir'
subject_list = [
    'S0001', 'S0005', 'S0036', 'S0038', 'S0085', 'S0099', 'S0004', 'S0032',
    'S0037', 'S0057', 'S0098'
]
"""
Here we get the FA list including all the subjects.
"""