示例#1
0
文件: base.py 项目: schwarty/nipype
 def _get_sorteddict(self, object, dictwithhash=False):
     if isinstance(object, dict):
         out = {}
         for key, val in sorted(object.items()):
             if isdefined(val):
                 out[key] = self._get_sorteddict(val, dictwithhash)
     elif isinstance(object, (list,tuple)):
         out = []
         for val in object:
             if isdefined(val):
                 out.append(self._get_sorteddict(val, dictwithhash))
         if isinstance(object, tuple):
             out = tuple(out)
     else:
         if isdefined(object):
             if isinstance(object, str) and os.path.isfile(object):
                 if config.get('execution', 'hash_method').lower() == 'timestamp':
                     hash = hash_timestamp(object)
                 elif config.get('execution', 'hash_method').lower() == 'content':
                     hash = hash_infile(object)
                 else:
                     raise Exception("Unknown hash method: %s" % config.get('execution', 'hash_method'))
                 if dictwithhash:
                     out = (object, hash)
                 else:
                     out = hash
             elif isinstance(object, float):
                 out = '%.10f'%object
             else:
                 out = object
     return out
示例#2
0
文件: base.py 项目: agramfort/nipype
    def _get_sorteddict(self, object, dictwithhash=False, hash_method=None, hash_files=True):
        if isinstance(object, dict):
            out = {}
            for key, val in sorted(object.items()):
                if isdefined(val):
                    out[key] = self._get_sorteddict(val, dictwithhash, hash_method=hash_method, hash_files=hash_files)
        elif isinstance(object, (list, tuple)):
            out = []
            for val in object:
                if isdefined(val):
                    out.append(self._get_sorteddict(val, dictwithhash, hash_method=hash_method, hash_files=hash_files))
            if isinstance(object, tuple):
                out = tuple(out)
        else:
            if isdefined(object):
                if hash_files and isinstance(object, str) and os.path.isfile(object):
                    if hash_method == None:
                        hash_method = config.get("execution", "hash_method")

                    if hash_method.lower() == "timestamp":
                        hash = hash_timestamp(object)
                    elif hash_method.lower() == "content":
                        hash = hash_infile(object)
                    else:
                        raise Exception("Unknown hash method: %s" % hash_method)
                    if dictwithhash:
                        out = (object, hash)
                    else:
                        out = hash
                elif isinstance(object, float):
                    out = "%.10f" % object
                else:
                    out = object
        return out
示例#3
0
文件: base.py 项目: IBIC/nipype
    def run(self, **inputs):
        """Execute this interface.

        This interface will not raise an exception if runtime.returncode is
        non-zero.

        Parameters
        ----------
        inputs : allows the interface settings to be updated

        Returns
        -------
        results :  an InterfaceResult object containing a copy of the instance
        that was executed, provenance information and, if successful, results
        """
        self.inputs.set(**inputs)
        self._check_mandatory_inputs()
        interface = self.__class__
        # initialize provenance tracking
        env = deepcopy(os.environ.data)
        runtime = Bunch(cwd=os.getcwd(),
                        returncode=None,
                        duration=None,
                        environ=env,
                        hostname=gethostname())
        t = time()
        try:
            runtime = self._run_interface(runtime)
            runtime.duration = time() - t
            results = InterfaceResult(interface, runtime,
                                      inputs=self.inputs.get_traitsfree())
            results.outputs = self.aggregate_outputs(results.runtime)
        except Exception, e:
            if len(e.args) == 0:
                e.args = ("")

            message = "\nInterface %s failed to run." % self.__class__.__name__

            if config.has_option('logging', 'interface_level') and config.get('logging', 'interface_level').lower() == 'debug':
                inputs_str = "Inputs:" + str(self.inputs) + "\n"
            else:
                inputs_str = ''

            if len(e.args) == 1 and isinstance(e.args[0], str):
                e.args = (e.args[0] + " ".join([message, inputs_str]),)
            else:
                e.args += (message, )
                if inputs_str != '':
                    e.args += (inputs_str, )

            #exception raising inhibition for special cases
            if hasattr(self.inputs, 'ignore_exception') and \
            isdefined(self.inputs.ignore_exception) and \
            self.inputs.ignore_exception:
                import traceback
                runtime.traceback = traceback.format_exc()
                runtime.traceback_args = e.args
                return InterfaceResult(interface, runtime)
            else:
                raise
示例#4
0
文件: base.py 项目: agramfort/nipype
 def _hash_infile(self, adict, key):
     """ Inject file hashes into adict[key]"""
     stuff = adict[key]
     if not is_container(stuff):
         stuff = [stuff]
     file_list = []
     for afile in stuff:
         if is_container(afile):
             hashlist = self._hash_infile({"infiles": afile}, "infiles")
             hash = [val[1] for val in hashlist]
         else:
             if config.get("execution", "hash_method").lower() == "timestamp":
                 hash = hash_timestamp(afile)
             elif config.get("execution", "hash_method").lower() == "content":
                 hash = hash_infile(afile)
             else:
                 raise Exception("Unknown hash method: %s" % config.get("execution", "hash_method"))
         file_list.append((afile, hash))
     return file_list
示例#5
0
def test_TraitedSpec_withFile():
    tmp_infile = setup_file()
    tmpd, nme = os.path.split(tmp_infile)
    yield assert_true, os.path.exists(tmp_infile)
    class spec2(nib.TraitedSpec):
        moo = nib.File(exists=True)
        doo = nib.traits.List(nib.File(exists=True))
    infields = spec2(moo=tmp_infile,doo=[tmp_infile])
    if config.get('execution', 'hash_method').lower() == 'content':
        yield assert_equal, infields.hashval[1], '8c227fb727c32e00cd816c31d8fea9b9'
    teardown_file(tmpd)
示例#6
0
文件: base.py 项目: agramfort/nipype
 def __init__(self, command=None, **inputs):
     super(CommandLine, self).__init__(**inputs)
     self._environ = None
     if not hasattr(self, "_cmd"):
         self._cmd = None
     if self.cmd is None and command is None:
         raise Exception("Missing command")
     if command:
         self._cmd = command
     try:
         display_var = config.get("execution", "display_variable")
         self.inputs.environ["DISPLAY"] = display_var
     except NoOptionError:
         pass
示例#7
0
def test_caching():
    temp_dir = mkdtemp(prefix='test_memory_')
    old_rerun = config.get('execution', 'stop_on_first_rerun')
    try:
        # Prevent rerun to check that evaluation is computed only once
        config.set('execution', 'stop_on_first_rerun', 'true')
        mem = Memory(temp_dir)
        first_nb_run = nb_runs
        results = mem.cache(SideEffectInterface)(input1=2, input2=1)
        assert_equal(nb_runs, first_nb_run + 1)
        assert_equal(results.outputs.output1, [1, 2])
        results = mem.cache(SideEffectInterface)(input1=2, input2=1)
        # Check that the node hasn't been rerun
        assert_equal(nb_runs, first_nb_run + 1)
        assert_equal(results.outputs.output1, [1, 2])
        results = mem.cache(SideEffectInterface)(input1=1, input2=1)
        # Check that the node hasn been rerun
        assert_equal(nb_runs, first_nb_run + 2)
        assert_equal(results.outputs.output1, [1, 1])
    finally:
        rmtree(temp_dir)
        config.set('execution', 'stop_on_first_rerun', old_rerun)
示例#8
0
from nipype.utils.config import config
import matplotlib
matplotlib.use(config.get("execution", "matplotlib_backend"))
from vis import Overlay, PsMerge, Ps2Pdf, PlotRealignemntParameters
from threshold import ThresholdGGMM, CreateTopoFDRwithGGMM, ThresholdGMM, ThresholdFDR
from simgen import SimulationGenerator
from resampling import CalculateNonParametricFWEThreshold, CalculateProbabilityFromSamples, CalculateFDRQMap
from bootstrapping import BootstrapTimeSeries, PermuteTimeSeries
from bedpostx_particle_reader import Particle2Trackvis
from annotate_tracks import AnnotateTracts
from icc import ICC

import numpy as np

def estimate_fdr_and_fnr(true_pattern, exp_result):
    false_positives = sum(exp_result[true_pattern != 1] != 0)
    false_negatives = sum(exp_result[true_pattern != 0] == 0)
    all_positives = np.sum(exp_result != 0)
    all_negatives = np.sum(exp_result == 0)
    if all_positives == 0:
        fdr = 0
    else:
        fdr = float(false_positives)/float(all_positives)
        
    if all_negatives == 0:
        fnr = 0
    else:
        fnr = float(false_negatives)/float(all_negatives)
    return (fdr, fnr)
示例#9
0
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:

import logging
import logging.handlers
import os

from nipype.utils.config import config
import sys

#Sets up logging for pipeline and nodewrapper execution
LOG_FILENAME = os.path.join(config.get('logging','log_directory'),
                            'pypeline.log')
logging.basicConfig(stream=sys.stdout)
logger = logging.getLogger('workflow')
fmlogger = logging.getLogger('filemanip')
iflogger = logging.getLogger('interface')
hdlr = logging.handlers.RotatingFileHandler(LOG_FILENAME,
                                            maxBytes=int(config.get('logging',
                                                                    'log_size')),
                                            backupCount=int(config.get('logging',
                                                                       'log_rotate')))
formatter = logging.Formatter(fmt='%(asctime)s,%(msecs)d %(name)-2s '\
                                  '%(levelname)-2s:\n\t %(message)s',
                              datefmt='%y%m%d-%H:%M:%S')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.setLevel(logging.getLevelName(config.get('logging','workflow_level')))
fmlogger.addHandler(hdlr)
fmlogger.setLevel(logging.getLevelName(config.get('logging','filemanip_level')))
iflogger.addHandler(hdlr)
示例#10
0
def copyfile(originalfile, newfile, copy=False, create_new=False):
    """Copy or symlink ``originalfile`` to ``newfile``.

    Parameters
    ----------
    originalfile : str
        full path to original file
    newfile : str
        full path to new file
    copy : Bool
        specifies whether to copy or symlink files
        (default=False) but only for POSIX systems

    Returns
    -------
    None

    """
    newhash = None
    orighash = None
    fmlogger.debug(newfile)

    if create_new:
        while os.path.exists(newfile):
            base, fname, ext = split_filename(newfile)
            s = re.search('_c[0-9]{4,4}$',fname)
            i = 0
            if s:
                i = int(s.group()[2:])+1
                fname = fname[:-6] + "_c%04d"%i
            else:
                fname += "_c%04d"%i
            newfile = base + os.sep + fname + ext
    elif os.path.exists(newfile):
        if config.get('execution', 'hash_method').lower() == 'timestamp':
            newhash = hash_timestamp(newfile)
        elif config.get('execution', 'hash_method').lower() == 'content':
            newhash = hash_infile(newfile)
        fmlogger.debug("File: %s already exists,%s, copy:%d" \
                           % (newfile, newhash, copy))
    #the following seems unnecessary
    #if os.name is 'posix' and copy:
    #    if os.path.lexists(newfile) and os.path.islink(newfile):
    #        os.unlink(newfile)
    #        newhash = None
    if os.name is 'posix' and not copy:
        if os.path.lexists(newfile):
            if config.get('execution', 'hash_method').lower() == 'timestamp':
                orighash = hash_timestamp(originalfile)
            elif config.get('execution', 'hash_method').lower() == 'content':
                orighash = hash_infile(originalfile)
            fmlogger.debug('Original hash: %s, %s'%(originalfile, orighash))
            if newhash != orighash:
                os.unlink(newfile)
        if (newhash is None) or (newhash != orighash):
            os.symlink(originalfile,newfile)
    else:
        if newhash:
            if config.get('execution', 'hash_method').lower() == 'timestamp':
                orighash = hash_timestamp(originalfile)
            elif config.get('execution', 'hash_method').lower() == 'content':
                orighash = hash_infile(originalfile)
        if (newhash is None) or (newhash != orighash):
            try:
                fmlogger.debug("Copying File: %s->%s" \
                                  % (newfile, originalfile))
                shutil.copyfile(originalfile, newfile)
            except shutil.Error, e:
                fmlogger.warn(e.message)
        else:
示例#11
0
 def _gen_regress(self, i_onsets, i_durations, i_amplitudes, nscans):
     """Generates a regressor for a sparse/clustered-sparse acquisition
     """
     bplot = False
     if isdefined(self.inputs.save_plot) and self.inputs.save_plot:
         bplot=True
         import matplotlib
         matplotlib.use(config.get("execution", "matplotlib_backend"))
         import matplotlib.pyplot as plt
     TR = np.round(self.inputs.time_repetition*1000)  # in ms
     if self.inputs.time_acquisition:
         TA = np.round(self.inputs.time_acquisition*1000) # in ms
     else:
         TA = TR # in ms
     nvol = self.inputs.volumes_in_cluster
     SCANONSET = np.round(self.inputs.scan_onset*1000)
     total_time = TR*(nscans-nvol)/nvol + TA*nvol + SCANONSET
     SILENCE = TR-TA*nvol
     dt = TA/10.;
     durations  = np.round(np.array(i_durations)*1000)
     if len(durations) == 1:
         durations = durations*np.ones((len(i_onsets)))
     onsets = np.round(np.array(i_onsets)*1000)
     dttemp = gcd(TA, gcd(SILENCE, TR))
     if dt < dttemp:
         if dttemp % dt != 0:
             dt = gcd(dttemp, dt)
     if dt < 1:
         raise Exception("Time multiple less than 1 ms")
     iflogger.info("Setting dt = %d ms\n" % dt)
     npts = int(total_time/dt)
     times = np.arange(0, total_time, dt)*1e-3
     timeline = np.zeros((npts))
     timeline2 = np.zeros((npts))
     if isdefined(self.inputs.model_hrf) and self.inputs.model_hrf:
         hrf = spm_hrf(dt*1e-3)
     reg_scale = 1.0
     if self.inputs.scale_regressors:
         boxcar = np.zeros((50.*1e3/dt))
         if self.inputs.stimuli_as_impulses:
             boxcar[1.*1e3/dt] = 1.0
             reg_scale = float(TA/dt)
         else:
             boxcar[1.*1e3/dt:2.*1e3/dt] = 1.0
         if isdefined(self.inputs.model_hrf) and self.inputs.model_hrf:
             response = np.convolve(boxcar, hrf)
             reg_scale = 1./response.max()
             iflogger.info('response sum: %.4f max: %.4f'%(response.sum(), response.max()))
         iflogger.info('reg_scale: %.4f'%reg_scale)
     for i, t in enumerate(onsets):
         idx = int(t/dt)
         if i_amplitudes:
             if len(i_amplitudes)>1:
                 timeline2[idx] = i_amplitudes[i]
             else:
                 timeline2[idx] = i_amplitudes[0]
         else:
             timeline2[idx] = 1
         if bplot:
             plt.subplot(4, 1, 1)
             plt.plot(times, timeline2)
         if not self.inputs.stimuli_as_impulses:
             if durations[i] == 0:
                 durations[i] = TA*nvol
             stimdur = np.ones((int(durations[i]/dt)))
             timeline2 = np.convolve(timeline2, stimdur)[0:len(timeline2)]
         timeline += timeline2
         timeline2[:] = 0
     if bplot:
         plt.subplot(4, 1, 2)
         plt.plot(times, timeline)
     if isdefined(self.inputs.model_hrf) and self.inputs.model_hrf:
         timeline = np.convolve(timeline, hrf)[0:len(timeline)]
         if isdefined(self.inputs.use_temporal_deriv) and self.inputs.use_temporal_deriv:
             #create temporal deriv
             timederiv = np.concatenate(([0], np.diff(timeline)))
     if bplot:
         plt.subplot(4, 1, 3)
         plt.plot(times, timeline)
         if isdefined(self.inputs.use_temporal_deriv) and self.inputs.use_temporal_deriv:
             plt.plot(times, timederiv)
     # sample timeline
     timeline2 = np.zeros((npts))
     reg = []
     regderiv = []
     for i, trial in enumerate(np.arange(nscans)/nvol):
         scanstart = int((SCANONSET + trial*TR + (i%nvol)*TA)/dt)
         #print total_time/dt, SCANONSET, TR, TA, scanstart, trial, i%2, int(TA/dt)
         scanidx = scanstart+np.arange(int(TA/dt))
         timeline2[scanidx] = np.max(timeline)
         reg.insert(i, np.mean(timeline[scanidx])*reg_scale)
         if isdefined(self.inputs.use_temporal_deriv) and self.inputs.use_temporal_deriv:
             regderiv.insert(i, np.mean(timederiv[scanidx])*reg_scale)
     if isdefined(self.inputs.use_temporal_deriv) and self.inputs.use_temporal_deriv:
         iflogger.info('orthoganlizing derivative w.r.t. main regressor')
         regderiv = orth(reg, regderiv)
     if bplot:
         plt.subplot(4, 1, 3)
         plt.plot(times, timeline2)
         plt.subplot(4, 1, 4)
         plt.bar(np.arange(len(reg)), reg, width=0.5)
         plt.savefig('sparse.png')
         plt.savefig('sparse.svg')
     if regderiv:
         return [reg, regderiv]
     else:
         return reg
示例#12
0
#Sets up logging for pipeline and nodewrapper execution
LOG_FILENAME = 'pypeline.log'
logging.basicConfig()
logger = logging.getLogger('workflow')
fmlogger = logging.getLogger('filemanip')
iflogger = logging.getLogger('interface')
hdlr = logging.handlers.RotatingFileHandler(LOG_FILENAME,
                                            maxBytes=256000,
                                            backupCount=4)
formatter = logging.Formatter(fmt='%(asctime)s,%(msecs)d %(name)-2s '\
                                  '%(levelname)-2s:\n\t %(message)s',
                              datefmt='%y%m%d-%H:%M:%S')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.setLevel(logging.getLevelName(config.get('logging','workflow_level')))
fmlogger.addHandler(hdlr)
fmlogger.setLevel(logging.getLevelName(config.get('logging','filemanip_level')))
iflogger.addHandler(hdlr)
iflogger.setLevel(logging.getLevelName(config.get('logging','interface_level')))

class WorkflowBase(object):
    """ Define common attributes and functions for workflows and nodes
    """

    def __init__(self, name=None, base_dir=None,
                 overwrite=False, **kwargs):
        """ Initialize base parameters of a workflow or node

        Parameters
        ----------
示例#13
0
from nipype.utils.config import config
import matplotlib
matplotlib.use(config.get("execution", "matplotlib_backend"))
from vis import Overlay, PsMerge, Ps2Pdf, PlotRealignemntParameters
from threshold import ThresholdGGMM, CreateTopoFDRwithGGMM, ThresholdGMM, ThresholdFDR
from simgen import SimulationGenerator
from resampling import CalculateNonParametricFWEThreshold, CalculateProbabilityFromSamples, CalculateFDRQMap
from bootstrapping import BootstrapTimeSeries, PermuteTimeSeries
from bedpostx_particle_reader import Particle2Trackvis
from annotate_tracks import AnnotateTracts
from icc import ICC

import numpy as np


def estimate_fdr_and_fnr(true_pattern, exp_result):
    false_positives = sum(exp_result[true_pattern != 1] != 0)
    false_negatives = sum(exp_result[true_pattern != 0] == 0)
    all_positives = np.sum(exp_result != 0)
    all_negatives = np.sum(exp_result == 0)
    if all_positives == 0:
        fdr = 0
    else:
        fdr = float(false_positives) / float(all_positives)

    if all_negatives == 0:
        fnr = 0
    else:
        fnr = float(false_negatives) / float(all_negatives)
    return (fdr, fnr)