Exemple #1
1
    def fetcher():
        files = {}
        for i, (f, n) in enumerate(zip(remote_fnames, local_fnames)):
            files[n] = (baseurl + f, md5_list[i] if md5_list is not None else None)
        fetch_data(files, folder, data_size)

        if msg is not None:
            print(msg)
        if unzip:
            for f in local_fnames:
                split_ext = os.path.splitext(f)
                if split_ext[-1] == ".gz" or split_ext[-1] == ".bz2":
                    if os.path.splitext(split_ext[0])[-1] == ".tar":
                        ar = tarfile.open(pjoin(folder, f))
                        ar.extractall(path=folder)
                        ar.close()
                    else:
                        raise ValueError("File extension is not recognized")
                elif split_ext[-1] == ".zip":
                    z = zipfile.ZipFile(pjoin(folder, f), "r")
                    z.extractall(folder)
                    z.close()
                else:
                    raise ValueError("File extension is not recognized")

        return files, folder
Exemple #2
0
def create_test_dataset(project, label_type='class'):
    """Create and yield test labeled dataset, then delete.

    Params
    ------
    project : `models.Project` instance
        The project under which to create test dataset.
    label_type  : str
        String indicating whether data labels are class names ('class')
        for classification, or numerical values for regression (anything other
        than 'class'). Defaults to 'class'.

    """
    if label_type == 'class':
        header = pjoin(os.path.dirname(__file__),
                       'data', 'asas_training_subset_classes.dat')
    elif label_type == 'regr':
        header = pjoin(os.path.dirname(__file__),
                       'data', 'asas_training_subset_targets.dat')
    tarball = pjoin(os.path.dirname(__file__),
                    'data', 'asas_training_subset.tar.gz')
    header = shutil.copy2(header, cfg['paths']['upload_folder'])
    tarball = shutil.copy2(tarball, cfg['paths']['upload_folder'])
    ts_paths = data_management.parse_and_store_ts_data(
        tarball, cfg['paths']['ts_data_folder'], header)
    d = m.Dataset.add(name='test_ds', project=project, file_uris=ts_paths)
    d.save()
    try:
        yield d
    finally:
        d.delete_instance()
Exemple #3
0
def path_info_run(subj, run):
    """Construct path information dict for current subject/run.

    Parameters
    ----------
    subj : int
        subject number (0..15 inclusive)
    run : int
        run number (1..4 inclusive).

    Returns
    -------
    path_dict : dict
        a dict with all the necessary path-related keys, including 'rootdir',
        and 'design', where 'design' can have values 'event' or 'block'
        depending on which type of run this was for subject no `subj` and run no
        `run`
    """
    path_dict = {'subj': subj, 'run': run}
    if exists(pjoin(DATADIR, "fiac_%(subj)02d",
                    "block", "initial_%(run)02d.csv") % path_dict):
        path_dict['design'] = 'block'
    else:
        path_dict['design'] = 'event'
    rootdir = pjoin(DATADIR, "fiac_%(subj)02d", "%(design)s") % path_dict
    path_dict['rootdir'] = rootdir
    return path_dict
Exemple #4
0
def results_table(path_dict):
    """ Return precalculated results images for subject info in `path_dict`

    Parameters
    ----------
    path_dict : dict
        containing key 'rootdir'

    Returns
    -------
    rtab : dict
        dict with keys given by run directories for this subject, values being a
        list with filenames of effect and sd images.
    """
    # Which runs correspond to this design type?
    rootdir = path_dict['rootdir']
    runs = filter(lambda f: isdir(pjoin(rootdir, f)),
                  ['results_%02d' % i for i in range(1,5)] )

    # Find out which contrasts have t-statistics,
    # storing the filenames for reading below

    results = {}

    for rundir in runs:
        rundir = pjoin(rootdir, rundir)
        for condir in listdir(rundir):
            for stat in ['sd', 'effect']:
                fname_effect = abspath(pjoin(rundir, condir, 'effect.nii'))
                fname_sd = abspath(pjoin(rundir, condir, 'sd.nii'))
            if exists(fname_effect) and exists(fname_sd):
                results.setdefault(condir, []).append([fname_effect,
                                                       fname_sd])
    return results
Exemple #5
0
def test_copy_data_to_tmp_dir():
    """Test copy data to temp dir"""
    tmp_dir_path = cft.make_tmp_dir()
    copied_file_path1 = pjoin(tmp_dir_path,
                              "custom_feature_defs.py")
    copied_file_path2 = pjoin(tmp_dir_path,
                              "features_already_known.pkl")

    feats_known_dict = {"feat1": 0.215, "feat2": 0.311}
    ts_datafile = pjoin(DATA_PATH, "dotastro_215153.dat")
    t, m, e = ctt.parse_ts_data(ts_datafile)
    feats_known_dict['t'] = t
    feats_known_dict['m'] = m
    feats_known_dict['e'] = e

    for fpath in [copied_file_path1, copied_file_path2]:
        if os.path.exists(fpath):
            os.remove(fpath)
    assert(not os.path.exists(copied_file_path1))
    cft.copy_data_to_tmp_dir(tmp_dir_path, pjoin(DATA_PATH, "testfeature1.py"),
                             feats_known_dict)
    assert(os.path.exists(copied_file_path1))
    assert(os.path.exists(copied_file_path2))
    with open(copied_file_path2, "rb") as f:
        unpickled_dict = pickle.load(f)
    npt.assert_equal(unpickled_dict, feats_known_dict)
    shutil.rmtree(tmp_dir_path, ignore_errors=True)
Exemple #6
0
def get_experiment_initial(path_dict):
    """Get the record arrays for the experimental/initial designs.

    Parameters
    ----------
    path_dict : dict
        containing key 'rootdir', 'run', 'subj'

    Returns
    -------
    experiment, initial : Two record arrays.

    """
    # The following two lines read in the .csv files
    # and return recarrays, with fields
    # experiment: ['time', 'sentence', 'speaker']
    # initial: ['time', 'initial']

    rootdir = path_dict['rootdir']
    if not exists(pjoin(rootdir, "experiment_%(run)02d.csv") % path_dict):
        e = "can't find design for subject=%(subj)d,run=%(subj)d" % path_dict
        raise IOError(e)

    experiment = csv2rec(pjoin(rootdir, "experiment_%(run)02d.csv") % path_dict)
    initial = csv2rec(pjoin(rootdir, "initial_%(run)02d.csv") % path_dict)

    return experiment, initial
Exemple #7
0
def locate_cuda():
    """Locate the CUDA environment on the system

    Returns a dict with keys 'home', 'nvcc', 'include', and 'lib64'
    and values giving the absolute path to each directory.

    Starts by looking for the CUDAHOME env variable. If not found, everything
    is based on finding 'nvcc' in the PATH.
    """

    # first check if the CUDAHOME env variable is in use
    if 'CUDAHOME' in os.environ:
        home = os.environ['CUDAHOME']
        nvcc = pjoin(home, 'bin', 'nvcc')
    else:
        # otherwise, search the PATH for NVCC
        default_path = pjoin(os.sep, 'usr', 'local', 'cuda', 'bin')
        nvcc = find_in_path('nvcc', os.environ['PATH'] + os.pathsep + default_path)
        if nvcc is None:
            raise EnvironmentError('The nvcc binary could not be '
                'located in your $PATH. Either add it to your path, or set $CUDAHOME')
        home = os.path.dirname(os.path.dirname(nvcc))

    cudaconfig = {'home':home, 'nvcc':nvcc,
                  'include': pjoin(home, 'include'),
                  'lib64': pjoin(home, 'lib64')}
    for k, v in cudaconfig.iteritems():
        if not os.path.exists(v):
            raise EnvironmentError('The CUDA %s path could not be located in %s' % (k, v))

    return cudaconfig
Exemple #8
0
def group_analysis(design, contrast):
    """ Compute group analysis effect, t, sd for `design` and `contrast`

    Saves to disk in 'group' analysis directory

    Parameters
    ----------
    design : {'block', 'event'}
    contrast : str
        contrast name
    """
    array = np.array # shorthand
    # Directory where output will be written
    odir = futil.ensure_dir(futil.DATADIR, 'group', design, contrast)

    # Which subjects have this (contrast, design) pair?
    subj_con_dirs = futil.subj_des_con_dirs(design, contrast)
    if len(subj_con_dirs) == 0:
        raise ValueError('No subjects for %s, %s' % (design, contrast))

    # Assemble effects and sds into 4D arrays
    sds = []
    Ys = []
    for s in subj_con_dirs:
        sd_img = load_image(pjoin(s, "sd.nii"))
        effect_img = load_image(pjoin(s, "effect.nii"))
        sds.append(sd_img.get_data())
        Ys.append(effect_img.get_data())
    sd = array(sds)
    Y = array(Ys)

    # This function estimates the ratio of the fixed effects variance
    # (sum(1/sd**2, 0)) to the estimated random effects variance
    # (sum(1/(sd+rvar)**2, 0)) where rvar is the random effects variance.

    # The EM algorithm used is described in:
    #
    # Worsley, K.J., Liao, C., Aston, J., Petre, V., Duncan, G.H.,
    #    Morales, F., Evans, A.C. (2002). \'A general statistical
    #    analysis for fMRI data\'. NeuroImage, 15:1-15
    varest = onesample.estimate_varatio(Y, sd)
    random_var = varest['random']

    # XXX - if we have a smoother, use
    # random_var = varest['fixed'] * smooth(varest['ratio'])

    # Having estimated the random effects variance (and possibly smoothed it),
    # the corresponding estimate of the effect and its variance is computed and
    # saved.

    # This is the coordmap we will use
    coordmap = futil.load_image_fiac("fiac_00","wanatomical.nii").coordmap

    adjusted_var = sd**2 + random_var
    adjusted_sd = np.sqrt(adjusted_var)

    results = onesample.estimate_mean(Y, adjusted_sd) 
    for n in ['effect', 'sd', 't']:
        im = api.Image(results[n], copy(coordmap))
        save_image(im, pjoin(odir, "%s.nii" % n))
Exemple #9
0
def tst_passthrough(src_dir, mnt_dir, cache_timeout):
    name = name_generator()
    src_name = pjoin(src_dir, name)
    mnt_name = pjoin(src_dir, name)
    assert name not in os.listdir(src_dir)
    assert name not in os.listdir(mnt_dir)
    with open(src_name, 'w') as fh:
        fh.write('Hello, world')
    assert name in os.listdir(src_dir)
    if cache_timeout:
        safe_sleep(cache_timeout+1)
    assert name in os.listdir(mnt_dir)
    assert os.stat(src_name) == os.stat(mnt_name)

    name = name_generator()
    src_name = pjoin(src_dir, name)
    mnt_name = pjoin(src_dir, name)
    assert name not in os.listdir(src_dir)
    assert name not in os.listdir(mnt_dir)
    with open(mnt_name, 'w') as fh:
        fh.write('Hello, world')
    assert name in os.listdir(src_dir)
    if cache_timeout:
        safe_sleep(cache_timeout+1)
    assert name in os.listdir(mnt_dir)
    assert os.stat(src_name) == os.stat(mnt_name)
Exemple #10
0
def create_virtualenv(update_on_change=False):
    helpers.create_venv(
        VIRTUALENV, settings.PYREPO, pjoin(ZAMBONI, "requirements/prod.txt"), update_on_change=update_on_change
    )

    if settings.LOAD_TESTING:
        helpers.pip_install_reqs(pjoin(ZAMBONI, "requirements/load.txt"))
Exemple #11
0
def tst_open_read(src_dir, mnt_dir):
    name = name_generator()
    with open(pjoin(src_dir, name), 'wb') as fh_out, \
         open(TEST_FILE, 'rb') as fh_in:
        shutil.copyfileobj(fh_in, fh_out)

    assert filecmp.cmp(pjoin(mnt_dir, name), TEST_FILE, False)
Exemple #12
0
def tst_link(mnt_dir):
    name1 = pjoin(mnt_dir, name_generator())
    name2 = pjoin(mnt_dir, name_generator())
    shutil.copyfile(TEST_FILE, name1)
    assert filecmp.cmp(name1, TEST_FILE, False)

    fstat1 = os.lstat(name1)
    assert fstat1.st_nlink == 1

    os.link(name1, name2)

    fstat1 = os.lstat(name1)
    fstat2 = os.lstat(name2)
    for attr in ('st_mode', 'st_dev', 'st_uid', 'st_gid',
                 'st_size', 'st_atime', 'st_mtime', 'st_ctime'):
        assert getattr(fstat1, attr) == getattr(fstat2, attr)
    assert os.path.basename(name2) in os.listdir(mnt_dir)
    assert filecmp.cmp(name1, name2, False)

    os.unlink(name2)

    assert os.path.basename(name2) not in os.listdir(mnt_dir)
    with pytest.raises(FileNotFoundError):
        os.lstat(name2)

    os.unlink(name1)
Exemple #13
0
    def resample( self, in_path, idx_out, Ylm_out, doMergeB0 ) :
        if doMergeB0:
            nS = 1+self.scheme.dwi_count
            merge_idx = np.hstack((self.scheme.b0_idx[0],self.scheme.dwi_idx))
        else:
            nS = self.scheme.nS
            merge_idx = np.arange(nS)
        KERNELS = {}
        KERNELS['model'] = self.id
        KERNELS['wmr'] = np.zeros( (len(self.Rs),181,181,nS,), dtype=np.float32 )
        KERNELS['wmh'] = np.zeros( (len(self.ICVFs),181,181,nS,), dtype=np.float32 )
        KERNELS['iso'] = np.zeros( (len(self.d_ISOs),nS,), dtype=np.float32 )

        nATOMS = len(self.Rs) + len(self.ICVFs) + len(self.d_ISOs)
        progress = ProgressBar( n=nATOMS, prefix="   ", erase=True )

        # Cylinder(s)
        for i in xrange(len(self.Rs)) :
            lm = np.load( pjoin( in_path, 'A_%03d.npy'%progress.i ) )
            KERNELS['wmr'][i,:,:,:] = amico.lut.resample_kernel( lm, self.scheme.nS, idx_out, Ylm_out, False )[:,:,merge_idx]
            progress.update()

        # Zeppelin(s)
        for i in xrange(len(self.ICVFs)) :
            lm = np.load( pjoin( in_path, 'A_%03d.npy'%progress.i ) )
            KERNELS['wmh'][i,:,:,:] = amico.lut.resample_kernel( lm, self.scheme.nS, idx_out, Ylm_out, False )[:,:,merge_idx]
            progress.update()

        # Ball(s)
        for i in xrange(len(self.d_ISOs)) :
            lm = np.load( pjoin( in_path, 'A_%03d.npy'%progress.i ) )
            KERNELS['iso'][i,:] = amico.lut.resample_kernel( lm, self.scheme.nS, idx_out, Ylm_out, True )[merge_idx]
            progress.update()

        return KERNELS
Exemple #14
0
    def generate( self, out_path, aux, idx_in, idx_out ):
        scheme_high = amico.lut.create_high_resolution_scheme( self.scheme, b_scale = 1 )
        protocolHR = self.scheme2noddi( scheme_high )

        nATOMS = len(self.IC_ODs)*len(self.IC_VFs) + 1
        progress = ProgressBar( n=nATOMS, prefix="   ", erase=True )

        # Coupled contributions
        IC_KAPPAs = 1 / np.tan(self.IC_ODs*np.pi/2)
        for kappa in IC_KAPPAs:
            signal_ic = self.synth_meas_watson_SH_cyl_neuman_PGSE( np.array([self.dPar*1E-6, 0, kappa]), protocolHR['grad_dirs'], np.squeeze(protocolHR['gradient_strength']), np.squeeze(protocolHR['delta']), np.squeeze(protocolHR['smalldel']), np.array([0,0,1]), 0 )

            for v_ic in self.IC_VFs:
                dPerp = self.dPar*1E-6 * (1 - v_ic)
                signal_ec = self.synth_meas_watson_hindered_diffusion_PGSE( np.array([self.dPar*1E-6, dPerp, kappa]), protocolHR['grad_dirs'], np.squeeze(protocolHR['gradient_strength']), np.squeeze(protocolHR['delta']), np.squeeze(protocolHR['smalldel']), np.array([0,0,1]) )

                signal = v_ic*signal_ic + (1-v_ic)*signal_ec
                lm = amico.lut.rotate_kernel( signal, aux, idx_in, idx_out, False )
                np.save( pjoin( out_path, 'A_%03d.npy'%progress.i) , lm )
                progress.update()

        # Isotropic
        signal = self.synth_meas_iso_GPD( self.dIso*1E-6, protocolHR)
        lm = amico.lut.rotate_kernel( signal, aux, idx_in, idx_out, True )
        np.save( pjoin( out_path, 'A_%03d.npy'%progress.i) , lm )
        progress.update()
Exemple #15
0
    def resample( self, in_path, idx_out, Ylm_out, doMergeB0 ) :
        if doMergeB0:
            nS = 1+self.scheme.dwi_count
            merge_idx = np.hstack((self.scheme.b0_idx[0],self.scheme.dwi_idx))
        else:
            nS = self.scheme.nS
            merge_idx = np.arange(nS)
        KERNELS = {}
        KERNELS['model'] = self.id
        KERNELS['D']     = np.zeros( (len(self.d_perps),181,181,nS), dtype=np.float32 )
        KERNELS['CSF']   = np.zeros( (len(self.d_isos),nS), dtype=np.float32 )

        nATOMS = len(self.d_perps) + len(self.d_isos)
        progress = ProgressBar( n=nATOMS, prefix="   ", erase=True )

        # Tensor compartment(s)
        for i in xrange(len(self.d_perps)) :
            lm = np.load( pjoin( in_path, 'A_%03d.npy'%progress.i ) )
            KERNELS['D'][i,...] = amico.lut.resample_kernel( lm, self.scheme.nS, idx_out, Ylm_out, False )[:,:,merge_idx]
            progress.update()

        # Isotropic compartment(s)
        for i in xrange(len(self.d_isos)) :
            lm = np.load( pjoin( in_path, 'A_%03d.npy'%progress.i ) )
            KERNELS['CSF'][i,...] = amico.lut.resample_kernel( lm, self.scheme.nS, idx_out, Ylm_out, True )[merge_idx]
            progress.update()

        return KERNELS
Exemple #16
0
    def generate( self, out_path, aux, idx_in, idx_out ) :
        scheme_high = amico.lut.create_high_resolution_scheme( self.scheme, b_scale=1 )
        gtab = gradient_table( scheme_high.b, scheme_high.raw[:,0:3] )

        nATOMS = 1 + len(self.ICVFs) + len(self.d_ISOs)
        progress = ProgressBar( n=nATOMS, prefix="   ", erase=True )

        # Stick
        signal = single_tensor( gtab, evals=[0, 0, self.d_par] )
        lm = amico.lut.rotate_kernel( signal, aux, idx_in, idx_out, False )
        np.save( pjoin( out_path, 'A_001.npy' ), lm )
        progress.update()

        # Zeppelin(s)
        for d in [ self.d_par*(1.0-ICVF) for ICVF in self.ICVFs] :
            signal = single_tensor( gtab, evals=[d, d, self.d_par] )
            lm = amico.lut.rotate_kernel( signal, aux, idx_in, idx_out, False )
            np.save( pjoin( out_path, 'A_%03d.npy'%progress.i ), lm )
            progress.update()

        # Ball(s)
        for d in self.d_ISOs :
            signal = single_tensor( gtab, evals=[d, d, d] )
            lm = amico.lut.rotate_kernel( signal, aux, idx_in, idx_out, True )
            np.save( pjoin( out_path, 'A_%03d.npy'%progress.i ), lm )
            progress.update()
Exemple #17
0
def test_datasource_or_bomber():
    pkg_def = dict(
        relpath = 'pkg')
    with TemporaryDirectory() as tmpdir:
        nibd.get_data_path = lambda : [tmpdir]
        ds = datasource_or_bomber(pkg_def)
        yield (assert_raises,
               DataError,
               getattr,
               ds,
               'get_filename')
        pkg_dir = pjoin(tmpdir, 'pkg')
        os.mkdir(pkg_dir)
        tmpfile = pjoin(pkg_dir, 'config.ini')
        with open(tmpfile, 'wt') as fobj:
            fobj.write('[DEFAULT]\n')
            fobj.write('version = 0.2\n')
        ds = datasource_or_bomber(pkg_def)
        fn = ds.get_filename('some_file.txt')
        # check that versioning works
        pkg_def['min version'] = '0.2'
        ds = datasource_or_bomber(pkg_def) # OK
        fn = ds.get_filename('some_file.txt')
        pkg_def['min version'] = '0.3'
        ds = datasource_or_bomber(pkg_def) # not OK
        yield (assert_raises,
               DataError,
               getattr,
               ds,
               'get_filename')
Exemple #18
0
def test_datasource():
    # Tests for DataSource
    pth = pjoin('some', 'path')
    ds = Datasource(pth)
    yield assert_equal, ds.get_filename('unlikeley'), pjoin(pth, 'unlikeley')
    yield (assert_equal, ds.get_filename('un','like','ley'),
           pjoin(pth, 'un','like','ley'))
Exemple #19
0
def get_skeleton(name='C1'):
    """ provide skeletons generated from Local Skeleton Clustering (LSC)

    Parameters
    -----------
    name : str, 'C1' or 'C3'

    Returns
    -------
    dix : dictionary

    Examples
    ---------
    >>> from dipy.data import get_skeleton
    >>> C=get_skeleton('C1')
    >>> len(C.keys())
    117
    >>> for c in C: break
    >>> sorted(C[c].keys())
    ['N', 'hidden', 'indices', 'most']
    """
    if name == 'C1':
        fname = pjoin(THIS_DIR, 'C1.pkl.gz')
    if name == 'C3':
        fname = pjoin(THIS_DIR, 'C3.pkl.gz')
    return loads_compat(gzip.open(fname, 'rb').read())
Exemple #20
0
def locate_cuda():
    """
    Locate the CUDA environment on the system

    Returns a dict with keys 'home', 'nvcc', 'include', and 'lib64'
    and values giving the absolute path to each directory.

    Starts by looking for the CUDA_HOME env variable. If not found, everything
    is based on finding 'nvcc' in the PATH.
    """

    # first check if the CUDA_HOME env variable is in use
    if 'CUDA_HOME' in os.environ:
        home = os.environ['CUDA_HOME']
        nvcc = pjoin(home, 'bin', 'nvcc')
    else:
        # otherwise, search the PATH for NVCC
        nvcc = find_in_path('nvcc', os.environ['PATH'])
        if nvcc is None:
            print_warning('The nvcc binary could not be located in your $PATH. '
                          'add it to your path, or set $CUDA_HOME.')
            return False
            
        home = os.path.dirname(os.path.dirname(nvcc))

    cudaconfig = {'home':home, 'nvcc':nvcc,
                  'include': pjoin(home, 'include'),
                  'lib64': pjoin(home, 'lib64')}
    print "CUDA config:", cudaconfig
    for k, v in cudaconfig.iteritems():
        if not os.path.exists(v):
            print_warning('The CUDA %s path could not be located in %s' % (k, v))
            return False
    return cudaconfig
Exemple #21
0
def load_instruments(exclude=None):
    if exclude is None:
        exclude = []
    #paths = [pjoin(d, 'instruments') for d in get_conf_dirs(skip_module_dir=True)]
    paths = [pjoin(d, 'instruments') for d in get_conf_dirs(skip_module_dir=False)]
    # move last path (within the pyHegel module) as the first so users can't override those
    # Reverse paths so we load pyHegel internal first and let user override them if needed.
    paths = paths[::-1]
    loaded = {}
    from .instruments_registry import add_to_instruments
    for p in paths:
        filenames = glob.glob(pjoin(p, '*.py'))
        for f in filenames:
            name = os.path.basename(f)[:-3] # remove .py
            if name == '__init__' or name in exclude:
                continue
            if re.match(r'[A-Za-z_][A-Za-z0-9_]*\Z', name) is None:
                raise RuntimeError('Trying to load "%s" but the name is invalid (should only contain letters, numbers and _)'%
                                    f)
            if name in loaded:
                print 'Skipping loading "%s" because a module with that name is already loaded from %s'%(f, loaded[name])
            fullname = INSTRUMENTS_BASE+'.'+name
            # instead of imp.load_source, could do
            #  insert path in sys.path
            #   import (using importlib.import_module)
            #  remove inserted path
            # But that makes reloading more complicated
            module = imp.load_source(fullname, f)
            loaded[fullname] = f
            add_to_instruments(name)(module)
    return loaded
Exemple #22
0
def mrtrix_spherical_functions():
    """Spherical functions represented by spherical harmonic coefficients and
    evaluated on a discrete sphere.

    Returns
    -------
    func_coef : array (2, 3, 4, 45)
        Functions represented by the coefficients associated with the
        mxtrix spherical harmonic basis of order 8.
    func_discrete : array (2, 3, 4, 81)
        Functions evaluated on `sphere`.
    sphere : Sphere
        The discrete sphere, points on the surface of a unit sphere, used to
        evaluate the functions.

    Notes
    -----
    These coefficients were obtained by using the dwi2SH command of mrtrix.

    """
    func_discrete = load(pjoin(THIS_DIR, "func_discrete.nii.gz")).get_data()
    func_coef = load(pjoin(THIS_DIR, "func_coef.nii.gz")).get_data()
    gradients = np.loadtxt(pjoin(THIS_DIR, "sphere_grad.txt"))
    # gradients[0] and the first volume of func_discrete, 
    # func_discrete[..., 0], are associated with the b=0 signal.
    # gradients[:, 3] are the b-values for each gradient/volume.
    sphere = Sphere(xyz=gradients[1:, :3])
    return func_coef, func_discrete[..., 1:], sphere
Exemple #23
0
def make_profile(logger, build_store, artifacts, target_dir, virtuals, cfg):
    """

    Parameters
    ----------
    logger : Logger

    build_store : BuildStore

    artifacts : list of dict(id=..., before=...)
        Lists the artifacts to include together with constraints

    target_dir : str
        Target directory, must be non-existing or entirely empty
    """
    for artifact in artifacts:
        a_id_desc = shorten_artifact_id(artifact['id'])
        logger.info('Linking %s into %s' % (a_id_desc, target_dir))
        sub_logger = logger.get_sub_logger(a_id_desc)
        install_artifact_into_profile(sub_logger, build_store, artifact['id'], target_dir,
                                      virtuals, cfg)

    # make profile.json
    doc = {'artifacts': artifacts}
    profile_json = pjoin(target_dir, 'profile.json')
    with open(profile_json, 'w') as f:
        json.dump(doc, f, **json_formatting_options)
        f.write('\n')
    write_protect(profile_json)

    # marker file for use by launcher
    if os.path.exists(pjoin(target_dir, 'bin')):
        touch(pjoin(target_dir, 'bin', 'is-profile-bin'), readonly=True)
Exemple #24
0
    def save(self, path):
        savedir = smartutils.create_folder(pjoin(path, type(self).__name__))
        smartutils.save_dict_to_json_file(pjoin(savedir, "hyperparams.json"), self.hyperparameters)

        params = {param.name: param.get_value() for param in self.parameters}
        assert len(self.parameters) == len(params)  # Implies names are all unique.
        np.savez(pjoin(savedir, "params.npz"), **params)
Exemple #25
0
def read_mni_template(contrast="T2"):
    """
    Read the MNI template from disk

    Parameters
    ----------
    contrast : list or string, optional
        Which of the contrast templates to read. Two contrasts are available:
        "T1" and "T2", so you can either enter one of these strings as input,
        or a list containing both of them.

    Returns
    -------
    list : contains the nibabel.Nifti1Image objects requested, according to the
        order they were requested in the input.

    Examples
    --------
    Get only the T2 file:
    >>> T2_nifti = read_mni_template("T2") # doctest: +SKIP
    Get both files in this order:
    >>> T1_nifti, T2_nifti = read_mni_template(["T1", "T2"]) # doctest: +SKIP
    """
    files, folder = fetch_mni_template()
    file_dict = {"T1": pjoin(folder, 'mni_icbm152_t1_tal_nlin_asym_09a.nii'),
                 "T2": pjoin(folder, 'mni_icbm152_t2_tal_nlin_asym_09a.nii')}
    if isinstance(contrast, str):
        return nib.load(file_dict[contrast])
    else:
        out_list = []
        for k in contrast:
            out_list.append(nib.load(file_dict[k]))
    return out_list
Exemple #26
0
    def _call_hook(self, hook_name, kwargs):
        # On Python 2, pytoml returns Unicode values (which is correct) but the
        # environment passed to check_call needs to contain string values. We
        # convert here by encoding using ASCII (the backend can only contain
        # letters, digits and _, . and : characters, and will be used as a
        # Python identifier, so non-ASCII content is wrong on Python 2 in
        # any case).
        if sys.version_info[0] == 2:
            build_backend = self.build_backend.encode('ASCII')
        else:
            build_backend = self.build_backend

        with tempdir() as td:
            compat.write_json({'kwargs': kwargs}, pjoin(td, 'input.json'),
                              indent=2)

            # Run the hook in a subprocess
            self._subprocess_runner(
                [sys.executable, _in_proc_script, hook_name, td],
                cwd=self.source_dir,
                extra_environ={'PEP517_BUILD_BACKEND': build_backend}
            )

            data = compat.read_json(pjoin(td, 'output.json'))
            if data.get('unsupported'):
                raise UnsupportedOperation
            if data.get('no_backend'):
                raise BackendUnavailable
            return data['return_val']
    def testIOEvent(self):
        tempdir = tempfile.mkdtemp(prefix='pyrocko-model')
        fn = pjoin(tempdir, 'event.txt')
        e1 = model.Event(
            10., 20., 1234567890., 'bubu', region='taka tuka land',
            moment_tensor=moment_tensor.MomentTensor(strike=45., dip=90),
            magnitude=5.1, magnitude_type='Mw')
        guts.dump(e1, filename=fn)
        e2 = guts.load(filename=fn)
        assert e1.region == e2.region
        assert e1.name == e2.name
        assert e1.lat == e2.lat
        assert e1.lon == e2.lon
        assert e1.time == e2.time
        assert e1.region == e2.region
        assert e1.magnitude == e2.magnitude
        assert e1.magnitude_type == e2.magnitude_type
        assert e1.get_hash() == e2.get_hash()

        fn2 = pjoin(tempdir, 'events.txt')
        guts.dump_all([e1, e2], filename=fn2)

        with self.assertRaises(model.OneEventRequired):
            model.load_one_event(fn2)

        shutil.rmtree(tempdir)
Exemple #28
0
    def test_some_plots(self):
        assert_true(isdir(self.html_dir))

        def plot_file(num):
            return pjoin(self.html_dir, 'some_plots-{0}.png'.format(num))

        range_10, range_6, range_4 = [plot_file(i) for i in range(1, 4)]
        # Plot 5 is range(6) plot
        assert_true(file_same(range_6, plot_file(5)))
        # Plot 7 is range(4) plot
        assert_true(file_same(range_4, plot_file(7)))
        # Plot 11 is range(10) plot
        assert_true(file_same(range_10, plot_file(11)))
        # Plot 12 uses the old range(10) figure and the new range(6) figure
        assert_true(file_same(range_10, plot_file('12_00')))
        assert_true(file_same(range_6, plot_file('12_01')))
        # Plot 13 shows close-figs in action
        assert_true(file_same(range_4, plot_file(13)))
        # Plot 14 has included source
        with open(pjoin(self.html_dir, 'some_plots.html'), 'rb') as fobj:
            html_contents = fobj.read()
        assert_true(b'# Only a comment' in html_contents)
        # check plot defined in external file.
        assert_true(file_same(range_4, pjoin(self.html_dir, 'range4.png')))
        assert_true(file_same(range_6, pjoin(self.html_dir, 'range6.png')))
        # check if figure caption made it into html file
        assert_true(b'This is the caption for plot 15.' in html_contents)
Exemple #29
0
def get_conf_dirs_posix():
    xdg_config_home = os.environ.get('XDG_CONFIG_HOME', pjoin(USER_HOME, '.config'))
    xdg_config_dirs = os.environ.get('XDG_CONFIG_DIRS', '/etc/xdg').split(':')
    default_user = DEFAULT_USER_DIR
    default_system = pjoin('/etc', CONFIG_DIR)
    paths = []
    if isdir(xdg_config_home):
        # XDG
        xdg_user = pjoin(xdg_config_home, CONFIG_DIR)
        xdg_dirs = [pjoin(d, CONFIG_DIR) for d in xdg_config_dirs]
        if isdir(xdg_user):
            default_user = xdg_user
        default_user = get_env_user_dir(default_user)
        paths.append(default_user)
        for d in xdg_dirs:
            if isdir(d):
                paths.extend(xdg_dirs)
                break
        else:
            paths.append(default_system)
    else:
        default_user = get_env_user_dir(default_user)
        paths.append(default_user)
        paths.append(default_system)
    return paths
Exemple #30
0
def bundled_settings(debug):
    """settings for linking extensions against bundled libzmq"""
    settings = {}
    settings['libraries'] = []
    settings['library_dirs'] = []
    settings['include_dirs'] = [pjoin("bundled", "zeromq", "include")]
    settings['runtime_library_dirs'] = []
    # add pthread on freebsd
    # is this necessary?
    if sys.platform.startswith('freebsd'):
        settings['libraries'].append('pthread')
    elif sys.platform.startswith('win'):
        # link against libzmq in build dir:
        plat = distutils.util.get_platform()
        temp = 'temp.%s-%i.%i' % (plat, sys.version_info[0], sys.version_info[1])
        suffix = ''
        if sys.version_info >= (3,5):
            # Python 3.5 adds EXT_SUFFIX to libs
            ext_suffix = distutils.sysconfig.get_config_var('EXT_SUFFIX')
            suffix = os.path.splitext(ext_suffix)[0]


        if debug:
            suffix = '_d' + suffix
            release = 'Debug'
        else:
            release = 'Release'

        settings['libraries'].append('libzmq' + suffix)
        settings['library_dirs'].append(pjoin('build', temp, release, 'buildutils'))

    return settings
Exemple #31
0

def approx_heaviside(s, eps):
    return 0.5 * (1 + (2 / math.pi) * torch.atan(s / eps))


root_dir = '/home/ubelix/data/medical-labeling'

out_size = 256

# img = io.imread(pjoin(root_dir, 'Dataset20/input-frames/frame_0030.png'))
# truth = io.imread(
#     pjoin(root_dir, 'Dataset20/ground_truth-frames/frame_0030.png'))
# p_x, p_y = 143, 132

img = io.imread(pjoin(root_dir, 'Dataset01/input-frames/frame_0150.png'))
truth = (io.imread(
    pjoin(root_dir, 'Dataset01/ground_truth-frames/frame_0150.png'))[..., 0] >
         0).astype(float)
p_x, p_y = 190, 100

# img = io.imread(pjoin(root_dir, 'Dataset30/input-frames/frame_0075.png'))[..., :3]
# truth = (io.imread(
#     pjoin(root_dir, 'Dataset30/ground_truth-frames/frame_0075.png'))[..., 0] > 0).astype(float)
# p_x, p_y = 150, 110

# p_x, p_y = 190, 100
# rr, cc = draw.ellipse(p_y, p_x, 40, 20, shape=(out_size, out_size), rotation=15)
# img = np.zeros((out_size, out_size, 3))
# img[rr, cc, ...] = 1
# truth = np.zeros((out_size, out_size))
Exemple #32
0
def update_model(sbml_file, directory, doc_fba=None):
    """
        Submodel for dynamically updating the metabolite count/concentration.
        This updates the ode model based on the FBA fluxes.
    """
    doc = builder.template_doc_update("ecoli")
    model = doc.getModel()
    update_notes = notes.format("""
        <h2>UPDATE submodel</h2>
        <p>Submodel for dynamically updating the metabolite count.
        This updates the ode model based on the FBA fluxes.</p>
        """)
    utils.set_model_info(model,
                         notes=update_notes,
                         creators=creators,
                         units=units,
                         main_units=main_units)

    # compartment
    compartment_id = "bioreactor"
    builder.create_dfba_compartment(model,
                                    compartment_id=compartment_id,
                                    unit_volume=UNIT_VOLUME,
                                    create_port=True)

    # dynamic species
    model_fba = doc_fba.getModel()

    # creates all the exchange reactions, biomass must be handeled separately
    builder.create_dfba_species(model,
                                model_fba,
                                compartment_id=compartment_id,
                                unit_amount=UNIT_AMOUNT,
                                create_port=True)

    # FIXME: biomass via function
    mc.create_objects(model, [
        mc.Parameter(sid='cf_biomass',
                     value=1.0,
                     unit="g_per_mmol",
                     name="biomass conversion factor",
                     constant=True),
        mc.Species(sid='X',
                   initialAmount=0.001,
                   compartment='c',
                   name='biomass',
                   substanceUnit='g',
                   hasOnlySubstanceUnits=True,
                   conversionFactor='cf_biomass')
    ])

    # update reactions
    # FIXME: weight with X (biomass)
    builder.create_update_reactions(model,
                                    model_fba=model_fba,
                                    formula="-{}",
                                    unit_flux=UNIT_FLUX,
                                    modifiers=[])

    # write SBML file
    sbmlio.write_sbml(doc, filepath=pjoin(directory, sbml_file), validate=True)
Exemple #33
0
 def get_ext_generated_cpp_source(self, name):
     if sys.platform == 'win32':
         head, tail = os.path.split(name)
         return pjoin(head, tail + ".cpp")
     else:
         return pjoin(name + ".cpp")
Exemple #34
0
    def _run_cmake(self):
        # The directory containing this setup.py
        source = osp.dirname(osp.abspath(__file__))

        # The staging directory for the module being built
        build_temp = pjoin(os.getcwd(), self.build_temp)
        build_lib = os.path.join(os.getcwd(), self.build_lib)
        saved_cwd = os.getcwd()

        if not os.path.isdir(self.build_temp):
            self.mkpath(self.build_temp)

        # Change to the build directory
        with changed_dir(self.build_temp):
            # Detect if we built elsewhere
            if os.path.isfile('CMakeCache.txt'):
                cachefile = open('CMakeCache.txt', 'r')
                cachedir = re.search('CMAKE_CACHEFILE_DIR:INTERNAL=(.*)',
                                     cachefile.read()).group(1)
                cachefile.close()
                if (cachedir != build_temp):
                    return

            static_lib_option = ''

            cmake_options = [
                '-DPYTHON_EXECUTABLE=%s' % sys.executable,
                static_lib_option,
            ]

            if self.cmake_generator:
                cmake_options += ['-G', self.cmake_generator]
            if self.with_cuda:
                cmake_options.append('-DPYARROW_BUILD_CUDA=on')
            if self.with_parquet:
                cmake_options.append('-DPYARROW_BUILD_PARQUET=on')
            if self.with_static_parquet:
                cmake_options.append('-DPYARROW_PARQUET_USE_SHARED=off')
            if not self.with_static_boost:
                cmake_options.append('-DPYARROW_BOOST_USE_SHARED=on')
            else:
                cmake_options.append('-DPYARROW_BOOST_USE_SHARED=off')

            if self.with_plasma:
                cmake_options.append('-DPYARROW_BUILD_PLASMA=on')

            if self.with_tensorflow:
                cmake_options.append('-DPYARROW_USE_TENSORFLOW=on')

            if self.with_orc:
                cmake_options.append('-DPYARROW_BUILD_ORC=on')

            if self.with_gandiva:
                cmake_options.append('-DPYARROW_BUILD_GANDIVA=on')

            if len(self.cmake_cxxflags) > 0:
                cmake_options.append('-DPYARROW_CXXFLAGS={0}'.format(
                    self.cmake_cxxflags))

            if self.generate_coverage:
                cmake_options.append('-DPYARROW_GENERATE_COVERAGE=on')

            if self.bundle_arrow_cpp:
                cmake_options.append('-DPYARROW_BUNDLE_ARROW_CPP=ON')
                # ARROW-1090: work around CMake rough edges
                if 'ARROW_HOME' in os.environ and sys.platform != 'win32':
                    pkg_config = pjoin(os.environ['ARROW_HOME'], 'lib',
                                       'pkgconfig')
                    os.environ['PKG_CONFIG_PATH'] = pkg_config
                    del os.environ['ARROW_HOME']

            if self.bundle_boost:
                cmake_options.append('-DPYARROW_BUNDLE_BOOST=ON')

            cmake_options.append('-DCMAKE_BUILD_TYPE={0}'.format(
                self.build_type.lower()))

            if self.boost_namespace != 'boost':
                cmake_options.append('-DBoost_NAMESPACE={}'.format(
                    self.boost_namespace))

            extra_cmake_args = shlex.split(self.extra_cmake_args)

            build_tool_args = []
            if sys.platform == 'win32':
                if not is_64_bit:
                    raise RuntimeError('Not supported on 32-bit Windows')
            else:
                build_tool_args.append('--')
                if os.environ.get('PYARROW_BUILD_VERBOSE', '0') == '1':
                    cmake_options.append('-DCMAKE_VERBOSE_MAKEFILE=ON')
                if os.environ.get('PYARROW_PARALLEL'):
                    build_tool_args.append('-j{0}'.format(
                        os.environ['PYARROW_PARALLEL']))

            # Generate the build files
            print("-- Running cmake for pyarrow")
            self.spawn(['cmake'] + extra_cmake_args + cmake_options + [source])
            print("-- Finished cmake for pyarrow")

            # Do the build
            print("-- Running cmake --build for pyarrow")
            self.spawn(['cmake', '--build', '.', '--config', self.build_type] +
                       build_tool_args)
            print("-- Finished cmake --build for pyarrow")

            if self.inplace:
                # a bit hacky
                build_lib = saved_cwd

            # Move the libraries to the place expected by the Python build
            try:
                os.makedirs(pjoin(build_lib, 'pyarrow'))
            except OSError:
                pass

            if sys.platform == 'win32':
                build_prefix = ''
            else:
                build_prefix = self.build_type

            print('Bundling includes: ' + pjoin(build_prefix, 'include'))
            if os.path.exists(pjoin(build_lib, 'pyarrow', 'include')):
                shutil.rmtree(pjoin(build_lib, 'pyarrow', 'include'))
            shutil.move(pjoin(build_prefix, 'include'),
                        pjoin(build_lib, 'pyarrow'))

            # Move the built C-extension to the place expected by the Python
            # build
            self._found_names = []
            for name in self.CYTHON_MODULE_NAMES:
                built_path = self.get_ext_built(name)
                if not os.path.exists(built_path):
                    print('Did not find {0}'.format(built_path))
                    if self._failure_permitted(name):
                        print(
                            'Cython module {0} failure permitted'.format(name))
                        continue
                    raise RuntimeError('pyarrow C-extension failed to build:',
                                       os.path.abspath(built_path))

                cpp_generated_path = self.get_ext_generated_cpp_source(name)
                if not os.path.exists(cpp_generated_path):
                    raise RuntimeError('expected to find generated C++ file '
                                       'in {0!r}'.format(cpp_generated_path))

                # The destination path to move the generated C++ source to
                # (for Cython source coverage)
                cpp_path = pjoin(build_lib, self._get_build_dir(),
                                 os.path.basename(cpp_generated_path))
                if os.path.exists(cpp_path):
                    os.remove(cpp_path)

                # The destination path to move the built C extension to
                ext_path = pjoin(build_lib, self._get_cmake_ext_path(name))
                if os.path.exists(ext_path):
                    os.remove(ext_path)
                self.mkpath(os.path.dirname(ext_path))

                print('Moving generated C++ source', cpp_generated_path,
                      'to build path', cpp_path)
                shutil.move(cpp_generated_path, cpp_path)
                print('Moving built C-extension', built_path, 'to build path',
                      ext_path)
                shutil.move(built_path, ext_path)
                self._found_names.append(name)

                if os.path.exists(self.get_ext_built_api_header(name)):
                    shutil.move(
                        self.get_ext_built_api_header(name),
                        pjoin(os.path.dirname(ext_path), name + '_api.h'))

            if self.bundle_arrow_cpp:
                print(pjoin(build_lib, 'pyarrow'))
                move_shared_libs(build_prefix, build_lib, "arrow")
                move_shared_libs(build_prefix, build_lib, "arrow_python")
                if self.with_cuda:
                    move_shared_libs(build_prefix, build_lib, "arrow_gpu")
                if self.with_plasma:
                    move_shared_libs(build_prefix, build_lib, "plasma")
                if self.with_gandiva:
                    move_shared_libs(build_prefix, build_lib, "gandiva")
                if self.with_parquet and not self.with_static_parquet:
                    move_shared_libs(build_prefix, build_lib, "parquet")
                if not self.with_static_boost and self.bundle_boost:
                    move_shared_libs(build_prefix,
                                     build_lib,
                                     "{}_filesystem".format(
                                         self.boost_namespace),
                                     implib_required=False)
                    move_shared_libs(build_prefix,
                                     build_lib,
                                     "{}_system".format(self.boost_namespace),
                                     implib_required=False)
                    move_shared_libs(build_prefix,
                                     build_lib,
                                     "{}_regex".format(self.boost_namespace),
                                     implib_required=False)
                if sys.platform == 'win32':
                    # zlib uses zlib.dll for Windows
                    zlib_lib_name = 'zlib'
                    move_shared_libs(build_prefix,
                                     build_lib,
                                     zlib_lib_name,
                                     implib_required=False)

            if self.with_plasma:
                # Move the plasma store
                source = os.path.join(self.build_type, "plasma_store_server")
                target = os.path.join(build_lib, self._get_build_dir(),
                                      "plasma_store_server")
                shutil.move(source, target)
import os
import logging as log
from setuptools import setup, Command
from setuptools.command.build_py import build_py
from setuptools.command.sdist import sdist
from glob import glob
from os.path import join as pjoin
from subprocess import check_call

log.basicConfig(level=log.DEBUG)
log.info('setup.py entered')
log.info('$PATH=%s' % os.environ['PATH'])

repo_root = os.path.dirname(os.path.abspath(__file__))
is_repo = os.path.exists(pjoin(repo_root, '.git'))

npm_path = os.pathsep.join([
    pjoin(repo_root, 'node_modules', '.bin'),
    os.environ.get("PATH", os.defpath),
])


def mtime(path):
    """shorthand for mtime"""
    return os.stat(path).st_mtime


def js_prerelease(command, strict=False):
    """decorator for building minified js/css prior to another command"""
    class DecoratedCommand(command):
Exemple #36
0
from contextlib import contextmanager
import os
from os.path import dirname, abspath, join as pjoin
import shutil
from subprocess import check_call
import sys
from tempfile import mkdtemp

from . import compat

_in_proc_script = pjoin(dirname(abspath(__file__)), '_in_process.py')


@contextmanager
def tempdir():
    td = mkdtemp()
    try:
        yield td
    finally:
        shutil.rmtree(td)


class BackendUnavailable(Exception):
    """Will be raised if the backend cannot be imported in the hook process."""


class UnsupportedOperation(Exception):
    """May be raised by build_sdist if the backend indicates that it can't."""


def default_subprocess_runner(cmd, cwd=None, extra_environ=None):
Exemple #37
0
def create_model(output_dir):
    """ Create all models.

    :return: directory where SBML files are located
    """
    directory = utils.versioned_directory(output_dir, version=settings.VERSION)

    # create sbml
    import time
    t_start = time.time()

    doc_fba = fba_model(settings.FBA_LOCATION, directory)
    t_fba = time.time()
    print('{:<10}: {:3.2f}'.format('fba', t_fba - t_start))

    bounds_model(settings.BOUNDS_LOCATION, directory, doc_fba=doc_fba)
    t_bounds = time.time()
    print('{:<10}: {:3.2f}'.format('bounds', t_bounds - t_fba))

    update_model(settings.UPDATE_LOCATION, directory, doc_fba=doc_fba)
    t_update = time.time()
    print('{:<10}: {:3.2f}'.format('update', t_update - t_bounds))

    emds = {
        "ecoli_fba": settings.FBA_LOCATION,
        "ecoli_bounds": settings.BOUNDS_LOCATION,
        "ecoli_update": settings.UPDATE_LOCATION,
    }

    # flatten top model
    top_model(settings.TOP_LOCATION, directory, emds, doc_fba=doc_fba)
    t_top = time.time()
    print('{:<10}: {:3.2f}'.format('top', t_top - t_update))

    comp.flattenSBMLFile(sbml_path=pjoin(directory, settings.TOP_LOCATION),
                         output_path=pjoin(directory,
                                           settings.FLATTENED_LOCATION))
    t_flat = time.time()
    print('{:<10}: {:3.2f}'.format('flat', t_flat - t_top))

    # create reports
    locations = [
        settings.FBA_LOCATION, settings.BOUNDS_LOCATION,
        settings.UPDATE_LOCATION, settings.TOP_LOCATION,
        settings.FLATTENED_LOCATION
    ]

    sbml_paths = [pjoin(directory, fname) for fname in locations]
    sbmlreport.create_reports(sbml_paths, directory, validate=False)

    # create sedml
    from sbmlutils.dfba.sedml import create_sedml
    sids = [
        'ac_e', 'acald_e', 'akg_e', 'co2_e', 'etoh_e', 'for_e', 'fru_e',
        'fum_e', 'glc__D_e', 'gln__L_e', 'glu__L_e', 'h_e', 'h2o_e',
        'lac__D_e', 'mal__L_e', 'nh4_e', 'o2_e', 'pi_e', 'pyr_e', 'succ_e', 'X'
    ]
    species_ids = ", ".join(sids)
    reaction_ids = ", ".join(['EX_{}'.format(sid) for sid in sids])
    create_sedml(settings.SEDML_LOCATION,
                 settings.TOP_LOCATION,
                 directory=directory,
                 dt=0.01,
                 tend=3.5,
                 species_ids=species_ids,
                 reaction_ids=reaction_ids)

    return directory
Exemple #38
0
def fba_model(sbml_file, directory):
    """ Create FBA submodel.
    """
    # Read the model
    fba_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),
                            'data/ecoli_fba.xml')
    doc_fba = sbmlio.read_sbml(fba_path)

    # add comp
    doc_fba.enablePackage(
        "http://www.sbml.org/sbml/level3/version1/comp/version1", "comp", True)
    doc_fba.setPackageRequired("comp", True)

    # add notes
    model = doc_fba.getModel()
    fba_notes = notes.format("""DFBA FBA submodel.""")
    utils.set_model_info(model,
                         notes=fba_notes,
                         creators=None,
                         units=units,
                         main_units=main_units)

    # clip R_ reaction and M_ metabolite prefixes
    utils.clip_prefixes_in_model(model)

    # set id & framework
    model.setId('ecoli_fba')
    model.setName('ecoli (FBA)')
    model.setSBOTerm(comp.SBO_FLUX_BALANCE_FRAMEWORK)

    # add units and information
    for species in model.getListOfSpecies():
        species.setInitialConcentration(0.0)
        species.setHasOnlySubstanceUnits(False)
        species.setUnits(UNIT_AMOUNT)
    for compartment in model.getListOfCompartments():
        compartment.setUnits(UNIT_VOLUME)

    # The ATPM (atp maintainance reactions creates many problems in the DFBA)
    # mainly resulting in infeasible solutions when some metabolites run out.
    # ATP -> ADP is part of the biomass, so we set the lower bound to zero
    r_ATPM = model.getReaction('ATPM')
    r_ATPM_fbc = r_ATPM.getPlugin(builder.SBML_FBC_NAME)
    lb_id = r_ATPM_fbc.getLowerFluxBound()
    model.getParameter(lb_id).setValue(0.0)  # 8.39 before

    # make unique upper and lower bounds for exchange reaction
    if not biomass_weighting:
        builder.update_exchange_reactions(model=model, flux_unit=UNIT_FLUX)
    else:
        builder.update_exchange_reactions(model=model,
                                          flux_unit=UNIT_FLUX_PER_G)

    # add exchange reaction for biomass (X)
    # we are adding the biomass component to the biomass function and create an
    # exchange reaction for it
    r_biomass = model.getReaction('BIOMASS_Ecoli_core_w_GAM')

    # FIXME: refactor in function
    # FIXME: annotate biomass species (SBO for biomass missing)
    mc.create_objects(model, [
        mc.Parameter(sid='cf_X',
                     value=1.0,
                     unit="g_per_mmol",
                     name="biomass conversion factor",
                     constant=True),
        mc.Species(sid='X',
                   initialAmount=0.001,
                   compartment='c',
                   name='biomass',
                   substanceUnit='g',
                   hasOnlySubstanceUnits=True,
                   conversionFactor='cf_X')
    ])

    pr_biomass = r_biomass.createProduct()
    pr_biomass.setSpecies('X')
    pr_biomass.setStoichiometry(1.0)
    pr_biomass.setConstant(True)
    # FIXME: the flux units must fit to the species units.
    if not biomass_weighting:
        builder.create_exchange_reaction(model,
                                         species_id='X',
                                         flux_unit=UNIT_FLUX,
                                         exchange_type=builder.EXCHANGE_EXPORT)
    else:
        builder.create_exchange_reaction(model,
                                         species_id='X',
                                         flux_unit=UNIT_FLUX_PER_G,
                                         exchange_type=builder.EXCHANGE_EXPORT)
    # write SBML file
    sbmlio.write_sbml(doc_fba,
                      filepath=pjoin(directory, sbml_file),
                      validate=True)

    # Set kinetic laws to zero for kinetic simulation
    '''
    for reaction in model.getListOfReactions():
        ast_node = mc.ast_node_from_formula(model=model, formula='0 {}'.format(UNIT_FLUX))
        law = reaction.createKineticLaw()
        law.setMath(ast_node)
    '''

    return doc_fba
Exemple #39
0
def test_server_push_second_branch_with_new_commit_then_clone_partial_fetch(
        server_instance, repo, managed_tmpdir, array5by7, nMasterCommits,
        nMasterSamples, nDevCommits, nDevSamples):
    from hangar import Repository
    from hangar.records.summarize import list_history

    # Push master branch test
    masterCmtList = []
    co = repo.checkout(write=True)
    co.add_ndarray_column(name='writtenaset', shape=(5, 7), dtype=np.float32)
    for cIdx in range(nMasterCommits):
        if cIdx != 0:
            co = repo.checkout(write=True)
        masterSampList = []
        with co.columns['writtenaset'] as d:
            for prevKey in list(d.keys())[1:]:
                del d[prevKey]
            for sIdx in range(nMasterSamples):
                arr = np.random.randn(*array5by7.shape).astype(
                    np.float32) * 100
                d[str(sIdx)] = arr
                masterSampList.append(arr)
        cmt = co.commit(f'master commit number: {cIdx}')
        masterCmtList.append((cmt, masterSampList))
        co.close()

    repo.remote.add('origin', server_instance)
    push1 = repo.remote.push('origin', 'master')
    assert push1 == 'master'
    masterHist = list_history(repo._env.refenv,
                              repo._env.branchenv,
                              branch_name='master')

    # Push dev branch test
    devCmtList = []
    branch = repo.create_branch('testbranch')
    for cIdx in range(nDevCommits):
        co = repo.checkout(write=True, branch=branch.name)
        devSampList = []
        with co.columns['writtenaset'] as d:
            for prevKey in list(d.keys())[1:]:
                del d[prevKey]
            for sIdx in range(nDevSamples):
                arr = np.random.randn(*array5by7.shape).astype(
                    np.float32) * 100
                d[str(sIdx)] = arr
                devSampList.append(arr)
        cmt = co.commit(f'dev commit number: {cIdx}')
        devCmtList.append((cmt, devSampList))
        co.close()

    push2 = repo.remote.push('origin', branch.name)
    assert push2 == branch.name
    branchHist = list_history(repo._env.refenv,
                              repo._env.branchenv,
                              branch_name=branch.name)

    # Clone test (master branch)
    new_tmpdir = pjoin(managed_tmpdir, 'new')
    mkdir(new_tmpdir)
    newRepo = Repository(path=new_tmpdir, exists=False)
    newRepo.clone('Test User',
                  '*****@*****.**',
                  server_instance,
                  remove_old=True)
    assert newRepo.list_branches() == ['master', 'origin/master']
    for cmt, sampList in masterCmtList:
        with pytest.warns(UserWarning):
            nco = newRepo.checkout(commit=cmt)
        assert len(nco.columns) == 1
        assert 'writtenaset' in nco.columns
        assert len(nco.columns['writtenaset']) == nMasterSamples

        assert nco.columns['writtenaset'].contains_remote_references is True
        remoteKeys = nco.columns['writtenaset'].remote_reference_keys
        assert tuple([str(idx) for idx in range(len(sampList))]) == remoteKeys
        for idx, _ in enumerate(sampList):
            sIdx = str(idx)
            assert sIdx in nco.columns['writtenaset']
            with pytest.raises(FileNotFoundError):
                shouldNotExist = nco.columns['writtenaset'][sIdx]
        nco.close()
    cloneMasterHist = list_history(newRepo._env.refenv,
                                   newRepo._env.branchenv,
                                   branch_name='master')
    assert cloneMasterHist == masterHist

    # Fetch test
    fetch = newRepo.remote.fetch('origin', branch=branch.name)
    assert fetch == f'origin/{branch.name}'
    assert newRepo.list_branches() == [
        'master', 'origin/master', f'origin/{branch.name}'
    ]
    for cmt, sampList in devCmtList:

        with pytest.warns(UserWarning):
            nco = newRepo.checkout(commit=cmt)
        assert len(nco.columns) == 1
        assert 'writtenaset' in nco.columns
        assert len(nco.columns['writtenaset']) == nDevSamples

        assert nco.columns['writtenaset'].contains_remote_references is True
        remoteKeys = nco.columns['writtenaset'].remote_reference_keys
        assert tuple([str(idx) for idx in range(len(sampList))]) == remoteKeys
        for idx, _ in enumerate(sampList):
            sIdx = str(idx)
            assert sIdx in nco.columns['writtenaset']
            with pytest.raises(FileNotFoundError):
                shouldNotExist = nco.columns['writtenaset'][sIdx]
        nco.close()

    cloneBranchHist = list_history(newRepo._env.refenv,
                                   newRepo._env.branchenv,
                                   branch_name=f'origin/{branch.name}')
    assert cloneBranchHist == branchHist
    newRepo._env._close_environments()
Exemple #40
0
def bounds_model(sbml_file, directory, doc_fba=None):
    """"
    Submodel for dynamically calculating the flux bounds.

    The dynamically changing flux bounds are the input to the
    FBA model.

    The units of the exchange fluxes must fit to the transported species.
    """
    doc = builder.template_doc_bounds("ecoli")
    model = doc.getModel()

    bounds_notes = notes.format("""
    <h2>BOUNDS submodel</h2>
    <p>Submodel for dynamically calculating the flux bounds.
    The dynamically changing flux bounds are the input to the
    FBA model.</p>
    """)
    utils.set_model_info(model,
                         notes=bounds_notes,
                         creators=creators,
                         units=units,
                         main_units=main_units)

    # dt
    compartment_id = "bioreactor"
    builder.create_dfba_dt(model, time_unit=UNIT_TIME, create_port=True)

    # compartment
    builder.create_dfba_compartment(model,
                                    compartment_id=compartment_id,
                                    unit_volume=UNIT_VOLUME,
                                    create_port=True)

    # dynamic species
    model_fba = doc_fba.getModel()
    builder.create_dfba_species(model,
                                model_fba,
                                compartment_id=compartment_id,
                                unit_amount=UNIT_AMOUNT,
                                create_port=True,
                                exclude_sids=['X'])
    # FIXME: define biomass separately, also port needed for biomass
    mc.create_objects(model, [
        mc.Parameter(sid='cf_X',
                     value=1.0,
                     unit="g_per_mmol",
                     name="biomass conversion factor",
                     constant=True),
        mc.Species(sid='X',
                   initialAmount=0.001,
                   compartment=compartment_id,
                   name='biomass',
                   substanceUnit='g',
                   hasOnlySubstanceUnits=True,
                   conversionFactor='cf_X')
    ])

    # exchange & dynamic bounds
    if not biomass_weighting:
        builder.create_exchange_bounds(model,
                                       model_fba=model_fba,
                                       unit_flux=UNIT_FLUX,
                                       create_ports=True)
        builder.create_dynamic_bounds(model, model_fba, unit_flux=UNIT_FLUX)
    else:
        builder.create_exchange_bounds(model,
                                       model_fba=model_fba,
                                       unit_flux=UNIT_FLUX_PER_G,
                                       create_ports=True)
        builder.create_dynamic_bounds(model,
                                      model_fba,
                                      unit_flux=UNIT_FLUX_PER_G)

    sbmlio.write_sbml(doc, filepath=pjoin(directory, sbml_file), validate=True)
Exemple #41
0
        out['fnorm'] = fnorm

        fx = [np.rollaxis(d['fx'], -1) for d in samples]
        fx = torch.stack([torch.from_numpy(f) for f in fx]).float()
        out['fx'] = fx

        fy = [np.rollaxis(d['fy'], -1) for d in samples]
        fy = torch.stack([torch.from_numpy(f) for f in fy]).float()
        out['fy'] = fy

        return out


if __name__ == "__main__":

    dset = StackLoader(root_path=pjoin(
        '/home/ubelix/artorg/lejeune/data/medical-labeling/Dataset30'),
                       normalization='rescale',
                       depth=2,
                       resize_shape=512)

    device = torch.device('cuda')
    model = Siamese(embedded_dims=15,
                    cluster_number=15,
                    alpha=1,
                    backbone='unet').to(device)

    dl = DataLoader(dset, collate_fn=dset.collate_fn)
    labels_pos = dict()
    n_labels = dict()
    for s in dl:
        for i, f in enumerate(s['frame_idx']):
Exemple #42
0
def test_server_push_two_branch_then_clone_fetch_data_options(
        server_instance, repo, managed_tmpdir, array5by7, nMasterCommits,
        nMasterSamples, nDevCommits, nDevSamples, fetchBranch, fetchCommit,
        fetchAsetns, fetchNbytes, fetchAll_history):
    from hangar import Repository
    from hangar.records.summarize import list_history
    from operator import eq

    # Push master branch test
    masterCmts = {}
    co = repo.checkout(write=True)
    co.add_ndarray_column(name='writtenaset', shape=(5, 7), dtype=np.float32)
    co.add_ndarray_column(name='_two', shape=(20), dtype=np.float32)
    co.add_str_column('str_col')
    co.add_bytes_column('bytes_col')
    for cIdx in range(nMasterCommits):
        if cIdx != 0:
            co = repo.checkout(write=True)
        masterSampList1 = []
        masterSampList2 = []
        masterSampList3 = []
        masterSampList4 = []
        with co.columns['writtenaset'] as d,\
                co.columns['_two'] as dd,\
                co.columns['str_col'] as scol, \
                co.columns['bytes_col'] as bcol:
            for prevKey in list(d.keys())[1:]:
                del d[prevKey]
                del dd[prevKey]
                del scol[prevKey]
                del bcol[prevKey]

            for sIdx in range(nMasterSamples):
                arr1 = np.random.randn(*array5by7.shape).astype(
                    np.float32) * 100
                d[str(sIdx)] = arr1
                masterSampList1.append(arr1)
                arr2 = np.random.randn(20).astype(np.float32)
                dd[str(sIdx)] = arr2
                masterSampList2.append(arr2)
                sval = f'strval master {cIdx} {sIdx}'
                scol[str(sIdx)] = sval
                masterSampList3.append(sval)
                bval = f'bytesval master {cIdx} {sIdx}'.encode()
                bcol[str(sIdx)] = bval
                masterSampList4.append(bval)

        cmt = co.commit(f'master commit number: {cIdx}')
        masterCmts[cmt] = (masterSampList1, masterSampList2, masterSampList3,
                           masterSampList4)
        co.close()

    repo.remote.add('origin', server_instance)
    push1 = repo.remote.push('origin', 'master')
    assert push1 == 'master'
    masterHist = list_history(repo._env.refenv,
                              repo._env.branchenv,
                              branch_name='master')

    # Push dev branch test
    devCmts = masterCmts.copy()
    branch = repo.create_branch('testbranch')
    for cIdx in range(nDevCommits):
        co = repo.checkout(write=True, branch=branch.name)
        devSampList1 = []
        devSampList2 = []
        devSampList3 = []
        devSampList4 = []
        with co.columns['writtenaset'] as d,\
                co.columns['_two'] as dd,\
                co.columns['str_col'] as scol, \
                co.columns['bytes_col'] as bcol:
            for prevKey in list(d.keys())[1:]:
                del d[prevKey]
                del dd[prevKey]
                del scol[prevKey]
                del bcol[prevKey]

            for sIdx in range(nDevSamples):
                arr1 = np.random.randn(*array5by7.shape).astype(
                    np.float32) * 100
                d[str(sIdx)] = arr1
                devSampList1.append(arr1)
                arr2 = np.random.randn(20).astype(np.float32)
                dd[str(sIdx)] = arr2
                devSampList2.append(arr2)
                sval = f'strval dev {cIdx} {sIdx}'
                scol[str(sIdx)] = sval
                devSampList3.append(sval)
                bval = f'bytesval dev {cIdx} {sIdx}'.encode()
                bcol[str(sIdx)] = bval
                devSampList4.append(bval)

        cmt = co.commit(f'dev commit number: {cIdx}')
        devCmts[cmt] = (devSampList1, devSampList2, devSampList3, devSampList4)
        co.close()

    push2 = repo.remote.push('origin', branch.name)
    assert push2 == branch.name
    branchHist = list_history(repo._env.refenv,
                              repo._env.branchenv,
                              branch_name=branch.name)

    # -------------------------- end setup ------------------------------------

    # Clone test (master branch)
    new_tmpdir = pjoin(managed_tmpdir, 'new')
    mkdir(new_tmpdir)
    newRepo = Repository(path=new_tmpdir, exists=False)
    newRepo.clone('Test User',
                  '*****@*****.**',
                  server_instance,
                  remove_old=True)
    newRepo.remote.fetch('origin', branch=branch.name)
    newRepo.create_branch('testbranch', base_commit=branchHist['head'])
    assert newRepo.list_branches() == [
        'master', 'origin/master', f'origin/{branch.name}', branch.name
    ]

    # ------------------ format arguments dependingon options -----------------

    kwargs = {
        'column_names': fetchAsetns,
        'max_num_bytes': fetchNbytes,
        'retrieve_all_history': fetchAll_history,
    }
    if fetchBranch is not None:
        func = branchHist if fetchBranch == 'testbranch' else masterHist
        kwargs['branch'] = fetchBranch
        kwargs['commit'] = None
    else:
        func = branchHist if fetchBranch == 'br' else masterHist
        kwargs['branch'] = None
        kwargs['commit'] = func['head']

    if fetchAll_history is True:
        commits_to_check = func['order']
    else:
        commits_to_check = [func['head']]

    # ----------------------- retrieve data with desired options --------------

    # This case should fail
    if (fetchAll_history is True) and isinstance(fetchNbytes, int):
        try:
            with pytest.raises(ValueError):
                fetch_commits = newRepo.remote.fetch_data(remote='origin',
                                                          **kwargs)
        finally:
            newRepo._env._close_environments()
        return True
    # get data
    fetch_commits = newRepo.remote.fetch_data(remote='origin', **kwargs)
    assert commits_to_check == fetch_commits

    # ------------- check that you got everything you expected ----------------

    for fCmt in fetch_commits:
        co = newRepo.checkout(commit=fCmt)
        assert co.commit_hash == fCmt

        # when we are checking one aset only
        if isinstance(fetchAsetns, tuple):
            d = co.columns[fetchAsetns[0]]
            # ensure we didn't fetch the other data simultaneously

            ds1SampList, ds2SampList, ds3SampList, ds4SampList = devCmts[fCmt]
            if fetchAsetns[0] == 'writtenaset':
                compare = ds1SampList
                cmp_func = np.allclose
            elif fetchAsetns[0] == '_two':
                compare = ds2SampList
                cmp_func = np.allclose
            elif fetchAsetns[0] == 'str_col':
                compare = ds3SampList
                cmp_func = eq
            else:
                compare = ds4SampList
                cmp_func = eq

            totalSeen = 0
            for idx, samp in enumerate(compare):
                if fetchNbytes is None:
                    assert cmp_func(samp, d[str(idx)])
                else:
                    try:
                        arr = d[str(idx)]
                        assert cmp_func(samp, arr)
                        try:
                            totalSeen += arr.nbytes
                        except AttributeError:
                            totalSeen += len(arr)
                    except FileNotFoundError:
                        pass
                    assert totalSeen <= fetchNbytes

        # compare both asets at the same time
        else:
            d = co.columns['writtenaset']
            dd = co.columns['_two']
            str_col = co.columns['str_col']
            bytes_col = co.columns['bytes_col']
            ds1List, ds2List, ds3List, ds4List = devCmts[fCmt]
            totalSeen = 0
            for idx, ds1ds2ds3ds4 in enumerate(
                    zip(ds1List, ds2List, ds3List, ds4List)):
                ds1, ds2, ds3, ds4 = ds1ds2ds3ds4
                if fetchNbytes is None:
                    assert np.allclose(ds1, d[str(idx)])
                    assert np.allclose(ds2, dd[str(idx)])
                    assert ds3 == str_col[str(idx)]
                    assert ds4 == bytes_col[str(idx)]
                else:
                    try:
                        arr1 = d[str(idx)]
                        assert np.allclose(ds1, arr1)
                        totalSeen += arr1.nbytes
                    except FileNotFoundError:
                        pass
                    try:
                        arr2 = dd[str(idx)]
                        assert np.allclose(ds2, arr2)
                        totalSeen += arr2.nbytes
                    except FileNotFoundError:
                        pass
                    try:
                        sval = str_col[str(idx)]
                        assert ds3 == sval
                        totalSeen += len(sval.encode())
                    except FileNotFoundError:
                        pass
                    try:
                        bval = bytes_col[str(idx)]
                        assert ds4 == bval
                        totalSeen += len(bval)
                    except FileNotFoundError:
                        pass
                    assert totalSeen <= fetchNbytes
        co.close()
    newRepo._env._close_environments()
Exemple #43
0
def create_initial_positions3(xdivide=10, ydivide=10,lon0=55.,lon1=56.,lat0=-21.5, lat1=-20.5
                            ,start=1, zdivide=10, k0=56., k1=60., crop=False,
                             outfile='initial_positions.txt', domain_dir='/Users/agn/Data/NEMO0083'):

    lons = np.linspace(lon0, lon1, xdivide+1)
    lats = np.linspace(lat0, lat1, ydivide+1)
    kdepths = np.linspace(k0, k1, zdivide+1)

    path = pjoin(domain_dir,'mask.nc')
    print('path for mask file', path)
    with Dataset(path) as f:
        Ndlat = f.variables['nav_lat']
        meridional = Ndlat[:,0]
        jeq = meridional.searchsorted(0.)
        Ndlon = f.variables['nav_lon']
        zonal = Ndlon[jeq,:]

        ibreak = np.argmax(zonal) + 1
        if lon0 > zonal[0] and lon1 < zonal[ibreak-1]:
            zonal_part = zonal[:ibreak]
            iadd = 0
        elif lon0 > zonal[ibreak] and lon1 < zonal[-1]:
            zonal_part = zonal[ibreak:]
            iadd = ibreak
        else:
            sys.exit('lon0 and lon1 bracket dateline; not implemented yet')

        i0 = zonal_part.searchsorted(lon0) - 1 + iadd
        i1 = zonal_part.searchsorted(lon1) + 1 + iadd
        imid = (i0 + i1)//2
        meridional = Ndlat[:,imid]
        j0 = meridional.searchsorted(lat0) - 1
        j1 = meridional.searchsorted(lat1) + 1
        jmid = (j0 + j1)//2
        zonal = Ndlon[jmid,:]

        dlon = np.diff(zonal)
        ibreak = np.argmin(dlon)

        ri = np.interp(lons, zonal[i0:i1], np.arange(i0,i1,dtype=zonal.dtype))
        rj = np.interp(lats, meridional[j0:j1], np.arange(j0,j1,dtype=meridional.dtype))
        print('specified longitudes are','\n',lons)
        print('i values are','\n',ri + 1. -.5)
        print('specified latitudes are','\n',lats)
        print('j values are','\n',rj + 1. -.5)
        print('k values are','\n',kdepths)

        i00, i11 = i0 - 1, i1 + 1
        j00, j11 = j0 - 1, j1 + 1
        tmask = ~(f.variables['tmask'][0,:,j00:j11,i00:i11].astype(np.bool))

    uvmask = tmask[:,1:-1,1:-1] + tmask[:,:-2,1:-1] + tmask[:,2:,1:-1] \
                + tmask[:,1:-1,:-2] + tmask[:,1:-1,2:]

    if crop:
        inner_circle, outer_circle, fuel_circle, box = get_circles()
        polygon = Polygon( zip(*box) )
    npoints = 0
    with open(outfile, 'w') as f:
        for kdepth in kdepths:
            for y, lat in zip(rj, lats):
                for x, lon in zip(ri, lons):
                    if (not crop) or (crop and polygon.contains(Point(lon, lat))):
                        i, j, k = int(x), int(y), int(kdepth)
                        if not uvmask[k-1,j-j0, i-i0]:
                            # ri and rj are  c-indices relative to T-cells
                            # subtract .5 to produce u v indices as required by ariane
                            # ... and add 1 to convert from C to fortran numbering
                            f.write('%10g%10g%10g%10g%5g\n' %(x + 1. -.5, y + 1. -.5 , kdepth, start, 1))
                            npoints +=1
    print('# of sea points started is %g out of %g' % (npoints, len(rj)*len(ri)*len(kdepths)))
Exemple #44
0
def plotTimeseries(inputPath, outputPath, locID=None):
    """
    Load the data and pass it to the :meth:`TimeSeriesFigure.plot` method.

    :param str inputPath: Path to the raw timeseries data.
    :param str outputPath: Path to the location that images should be
                           stored in.
    :param str locID: Unique identifier for a chosen location. If not
                      given, all files in the input path will be processed.

    Example: plotTimeseries('/tcrm/output/timeseries','/tcrm/output/plots')

    """
    if locID:
        # Only plot the data corresponding to the requested location ID:
        logging.debug("Plotting data for station {0}".format(locID))
        inputFile = pjoin(inputPath, 'ts.%s.csv' % (locID))
        outputFile = pjoin(outputPath, 'ts.%s.png' % (locID))
        inputData = loadTimeseriesData(inputFile)

        stnInfo = {
            'ID': locID,
            'lon': inputData['Longitude'][0],
            'lat': inputData['Latitude'][0]
        }
        title = 'Station ID: %s (%6.2f, %6.2f)' % (
            locID, inputData['Longitude'][0], inputData['Latitude'][0])
        fig = TimeSeriesFigure()
        fig.add(inputData['Time'], inputData['Pressure'], [900, 1020],
                'Pressure (hPa)', 'Pressure')
        fig.add(inputData['Time'], inputData['Speed'], [0, 100],
                'Wind speed (m/s)', 'Wind speed')
        fig.add(inputData['Time'], inputData['Bearing'], [0, 360], 'Direction',
                'Wind direction')

        fig.plot()
        fig.addTitle(title)
        saveFigure(fig, outputFile)

    else:
        files = os.listdir(inputPath)
        inputFileList = [f for f in files if f.startswith('ts.')]
        for f in inputFileList:
            # Here we assume the timeseries files are named ts.<location ID>.dat
            locID = f.rstrip('.csv').lstrip('ts.')
            outputFile = pjoin(outputPath, '%s.png' % f.rstrip('.csv'))
            inputData = loadTimeseriesData(pjoin(inputPath, f))

            stnInfo = {
                'ID': locID,
                'lon': inputData['Longitude'][0],
                'lat': inputData['Latitude'][0]
            }
            title = 'Station ID: %s (%6.2f, %6.2f)' % (
                locID, inputData['Longitude'][0], inputData['Latitude'][0])

            fig = TimeSeriesFigure()
            fig.add(inputData['Time'], inputData['Pressure'], [900, 1020],
                    'Pressure (hPa)', 'Sea level pressure')
            fig.add(inputData['Time'], inputData['Speed'], [0, 100],
                    'Wind speed (m/s)', 'Wind speed')
            fig.add(inputData['Time'], inputData['Bearing'], [0, 360],
                    'Direction', 'Wind direction')
            fig.plot()
            fig.addTitle(title)
            saveFigure(fig, outputFile)
def load_word_embeddings():
    return np.load(pjoin(DATA_DIR,
                         "glove.trimmed.100.npz"))["glove"].astype(np.float32)
Exemple #46
0
import sys
import os
from os.path import join as pjoin

# Import the packet proto definitions. If they are not found, attempt
# to generate them automatically. This assumes that the script is
# executed from the gem5 root.
try:
    import inst_dep_record_pb2
except:
    print("Did not find proto definition, attempting to generate")
    from subprocess import call
    gem5_root = os.environ['gem5_root']
    error = call([
        'protoc', '--python_out={}'.format(os.getcwd()),
        '--proto_path={}'.format(pjoin(gem5_root, 'src/proto')),
        pjoin(gem5_root, 'src/proto/inst_dep_record.proto')
    ])
    if not error:
        import inst_dep_record_pb2
        print("Generated proto definitions for instruction dependency record")
    else:
        print("Failed to import proto definitions")
        exit(-1)


def main():
    if len(sys.argv) != 2:
        print("Usage: ", sys.argv[0], " <protobuf input>")
        exit(-1)
def load_from_file(file):
    with open(pjoin(DATA_DIR, file), "r") as f:
        return np.array([list(map(int, line.strip().split())) for line in f])
def load_vocabulary():
    with open(pjoin(DATA_DIR, "vocab.dat"), "r") as f:
        return np.array([line.strip() for line in f])
Exemple #49
0
def pxd(name):
    return os.path.abspath(pjoin('pandas', name + '.pxd'))
def create_dataset(file):
    dataset = tf.data.TextLineDataset(pjoin(DATA_DIR, file))
    string_split = dataset.map(lambda string: tf.string_split([string]).values)
    integer_dataset = string_split.map(
        lambda x: tf.string_to_number(x, out_type=tf.int32))
    return integer_dataset
Exemple #51
0
License
=======
``monoensemble`` is licensed under the terms of the BSD 3 Clause License. See the
file "LICENSE" for information on the history of this software, terms &
conditions for usage, and a DISCLAIMER OF ALL WARRANTIES.

All trademarks referenced herein are property of their respective holders.

Copyright (c) 2017, Christopher Bartley
"""

NAME = 'monoensemble'
MAINTAINER = "Christopher Bartley"
MAINTAINER_EMAIL = "*****@*****.**"
DESCRIPTION = description
LONG_DESCRIPTION = long_description
URL = "http://github.com/chriswbartley/monoensemble"
DOWNLOAD_URL = ""
LICENSE = "BSD 3 Clause"
AUTHOR = "Christopher Bartley"
AUTHOR_EMAIL = "*****@*****.**"
PLATFORMS = "OS Independent"
MAJOR = _version_major
MINOR = _version_minor
MICRO = _version_micro
VERSION = __version__
PACKAGE_DATA = {'monoensemble': [pjoin('data', '*')]}
REQUIRES = []
INSTALL_REQUIRES = ["numpy","scipy","scikit-learn", "cython"]
Exemple #52
0
from os.path import join as pjoin, abspath, dirname, pardir

PROJ_ROOT = abspath(pjoin(dirname(__file__), pardir))
DATA_ROOT = pjoin(PROJ_ROOT, 'data')
THUMBNAIL_PREFIX = 'test/cache/'
THUMBNAIL_DEBUG = True
THUMBNAIL_LOG_HANDLER = {
    'class': 'sorl.thumbnail.log.ThumbnailLogHandler',
    'level': 'ERROR',
}
THUMBNAIL_KVSTORE = 'thumbnail_tests.kvstore.TestKVStore'
THUMBNAIL_STORAGE = 'thumbnail_tests.storage.TestStorage'
DEFAULT_FILE_STORAGE = 'thumbnail_tests.storage.TestStorage'
ADMINS = (('Sorl', '*****@*****.**'), )
DATABASES = {
    'default': {
        'ENGINE': 'django.db.backends.sqlite3',
        'NAME': 'thumbnail_tests',
    }
}
MEDIA_ROOT = pjoin(PROJ_ROOT, 'media')
MEDIA_URL = '/media/'
ROOT_URLCONF = 'thumbnail_tests.urls'
INSTALLED_APPS = (
    'thumbnail',
    'thumbnail_tests',
)
TEMPLATE_CONTEXT_PROCESSORS = ("django.core.context_processors.request", )
ALLOWED_HOSTS = ['localhost']
SECRET_KEY = 'test978udwi33487h5o385fb3oiun23e23oiu24fn'
Exemple #53
0
def read_events_from_cat(
    event_params: dict,
    cat: obspy.Catalog,
    inv: obspy.Inventory,
    local_folder: str,
    host_name: str = None,
    user_name: str = None,
    remote_folder: str = None,
    save_file_name: str = None,
) -> None:
    """
    Read mars-events from catalog.xml and adding waveform data to events.
    If waveforms are on server host_name, user_name, remote_folder should be specified
    :param event_params: Dict of event names for inversion
    :param cat: obspy.Catalog including the updated events
    :param local_folder: path to waveform on local machine
    :param host_name: Host IP address
    :param user_name: username of the server
    :param remote_folder: path to remote folder that contains waveform data
    :param save_file_name: name of .xml file to save the events, only saved when not None. (do not specify the entire folder, because this .xml file will be saved in /events/)
    """
    ## Variables that are set:
    dir_exist_name = "events"
    dir_exist = False

    ## Create empty list of events to invert for:
    events = []

    ## Setup remote directory /mnt/marshost
    event_names = [key for key in event_params.keys()]
    data_exist = False
    if all([isdir(path) for path in [pjoin(dir_exist_name, x) for x in event_names]]):
        data_exist = True
        print("The miniseed data is already saved in /events/.. and used for the inversion")
    else:
        if host_name is not None:
            # Check if your mount folder already exists from previous runs for example:
            if not lsdir(local_folder):
                print(f"{local_folder} is still empty, mounting now...")
                mnt_remote_folder(host_name, user_name, remote_folder, local_folder)
            # TOO: check if local folder exists, otherwise raise error.
    local_folder = pjoin(local_folder, "sc3data")
    for i, v in event_params.items():
        try:
            event = cat.select(name=i).events[0]

            # Reading the event waveforms:
            if data_exist:
                filepath = pjoin(dir_exist_name, i, "waveforms")
                event = read_waveforms_from_saved_dir(file_path=filepath, event=event)
            elif event.waveforms_VBB is not None:
                pass
            else:
                #
                event.read_waveforms(inv=inv, sc3dir=local_folder)

            events.append(event)

        except IndexError as e:
            raise IndexError("Event {} does not exist in the catalog".format(i))

    ## Unmount:
    if host_name is not None and data_exist is False:
        unmnt_remote_folder(local_folder)

    ## save the events as a new catalog
    if save_file_name is not None:
        # event_cat = obspy.Catalog(self.events)
        # event_cat.write(
        #     filename=pjoin(dir_exist_name,save_file_name.strip(".xml")+ ".xml"), format="QUAKEML"
        # )
        pass
        # TODO: IMPLEMENT SAVE FUNCTION!

    return events
Exemple #54
0
def srcpath(name=None, suffix='.pyx', subdir='src'):
    return pjoin('pandas', subdir, name + suffix)
Exemple #55
0
    def load_from(self, weights, n_block):
        ROOT = f"Transformer/encoderblock_{n_block}"
        with torch.no_grad():
            query_weight = np2th(weights[pjoin(ROOT, ATTENTION_Q,
                                               "kernel")]).view(
                                                   self.hidden_size,
                                                   self.hidden_size).t()
            key_weight = np2th(weights[pjoin(ROOT, ATTENTION_K,
                                             "kernel")]).view(
                                                 self.hidden_size,
                                                 self.hidden_size).t()
            value_weight = np2th(weights[pjoin(ROOT, ATTENTION_V,
                                               "kernel")]).view(
                                                   self.hidden_size,
                                                   self.hidden_size).t()
            out_weight = np2th(weights[pjoin(ROOT, ATTENTION_OUT,
                                             "kernel")]).view(
                                                 self.hidden_size,
                                                 self.hidden_size).t()

            query_bias = np2th(weights[pjoin(ROOT, ATTENTION_Q,
                                             "bias")]).view(-1)
            key_bias = np2th(weights[pjoin(ROOT, ATTENTION_K,
                                           "bias")]).view(-1)
            value_bias = np2th(weights[pjoin(ROOT, ATTENTION_V,
                                             "bias")]).view(-1)
            out_bias = np2th(weights[pjoin(ROOT, ATTENTION_OUT,
                                           "bias")]).view(-1)

            self.attn.query.weight.copy_(query_weight)
            self.attn.key.weight.copy_(key_weight)
            self.attn.value.weight.copy_(value_weight)
            self.attn.out.weight.copy_(out_weight)
            self.attn.query.bias.copy_(query_bias)
            self.attn.key.bias.copy_(key_bias)
            self.attn.value.bias.copy_(value_bias)
            self.attn.out.bias.copy_(out_bias)

            mlp_weight_0 = np2th(weights[pjoin(ROOT, FC_0, "kernel")]).t()
            mlp_weight_1 = np2th(weights[pjoin(ROOT, FC_1, "kernel")]).t()
            mlp_bias_0 = np2th(weights[pjoin(ROOT, FC_0, "bias")]).t()
            mlp_bias_1 = np2th(weights[pjoin(ROOT, FC_1, "bias")]).t()

            self.ffn.fc1.weight.copy_(mlp_weight_0)
            self.ffn.fc2.weight.copy_(mlp_weight_1)
            self.ffn.fc1.bias.copy_(mlp_bias_0)
            self.ffn.fc2.bias.copy_(mlp_bias_1)

            self.attention_norm.weight.copy_(
                np2th(weights[pjoin(ROOT, ATTENTION_NORM, "scale")]))
            self.attention_norm.bias.copy_(
                np2th(weights[pjoin(ROOT, ATTENTION_NORM, "bias")]))
            self.ffn_norm.weight.copy_(
                np2th(weights[pjoin(ROOT, MLP_NORM, "scale")]))
            self.ffn_norm.bias.copy_(
                np2th(weights[pjoin(ROOT, MLP_NORM, "bias")]))
Exemple #56
0
def main():
    """Main entry point of the script when it is invoked by XBMC."""
    log(u"Version: %s" % __version__, level=LOGINFO)

    # Get parameters from XBMC and launch actions
    params = get_params(sys.argv)

    if params['action'] in ('search', 'manualsearch'):
        item = {
            'temp': False,
            'rar': False,
            'year': xbmc.getInfoLabel("VideoPlayer.Year"),
            'season': str(xbmc.getInfoLabel("VideoPlayer.Season")),
            'episode': str(xbmc.getInfoLabel("VideoPlayer.Episode")),
            'tvshow': normalize_string(xbmc.getInfoLabel("VideoPlayer.TVshowtitle")),
            # Try to get original title
            'title': normalize_string(xbmc.getInfoLabel("VideoPlayer.OriginalTitle")),
            # Full path of a playing file
            'file_original_path': unquote(xbmc.Player().getPlayingFile().decode('utf-8')),
            '3let_language': [],
            '2let_language': [],
            'manual_search': 'searchstring' in params,
        }

        if 'searchstring' in params:
            item['manual_search_string'] = params['searchstring']

        for lang in unquote(params['languages']).decode('utf-8').split(","):
            item['3let_language'].append(xbmc.convertLanguage(lang, xbmc.ISO_639_2))
            item['2let_language'].append(xbmc.convertLanguage(lang, xbmc.ISO_639_1))

        if not item['title']:
            # No original title, get just Title
            item['title'] = normalize_string(xbmc.getInfoLabel("VideoPlayer.Title"))

        if "s" in item['episode'].lower():
            # Check if season is "Special"
            item['season'] = "0"
            item['episode'] = item['episode'][-1:]

        if "http" in item['file_original_path']:
            item['temp'] = True

        elif "rar://" in item['file_original_path']:
            item['rar'] = True
            item['file_original_path'] = os.path.dirname(item['file_original_path'][6:])

        elif "stack://" in item['file_original_path']:
            stackPath = item['file_original_path'].split(" , ")
            item['file_original_path'] = stackPath[0][8:]

        Search(item)

    elif params['action'] == 'download':
        workdir = pjoin(__profile__, 'temp')
        # Make sure it ends with a path separator (Kodi 14)
        workdir = workdir + os.path.sep
        workdir = xbmc.translatePath(workdir)

        ensure_workdir(workdir)

        # We pickup our arguments sent from the Search() function
        subs = Download(params["id"], workdir)
        # We can return more than one subtitle for multi CD versions, for now
        # we are still working out how to handle that in XBMC core
        for sub in subs:
            listitem = xbmcgui.ListItem(label=sub)
            xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url=sub,
                                        listitem=listitem, isFolder=False)

    # Send end of directory to XBMC
    xbmcplugin.endOfDirectory(int(sys.argv[1]))
Exemple #57
0
EXTRAS_REQUIRE = {
    'build': ['cython>=' + CYTHON_MIN_VER],
    'develop': ['cython>=' + CYTHON_MIN_VER],
    'docs': [
        'sphinx', 'nbconvert', 'jupyter_client', 'ipykernel', 'matplotlib',
        'nbformat', 'numpydoc', 'pandas-datareader'
    ]
}

###############################################################################
# Values that rarely change
###############################################################################
DISTNAME = 'statsmodels'
DESCRIPTION = 'Statistical computations and models for Python'
SETUP_DIR = split(abspath(__file__))[0]
with open(pjoin(SETUP_DIR, 'README.rst')) as readme:
    README = readme.read()
LONG_DESCRIPTION = README
MAINTAINER = 'statsmodels Developers'
MAINTAINER_EMAIL = '*****@*****.**'
URL = 'https://www.statsmodels.org/'
LICENSE = 'BSD License'
DOWNLOAD_URL = ''
PROJECT_URLS = {
    'Bug Tracker': 'https://github.com/statsmodels/statsmodels/issues',
    'Documentation': 'https://www.statsmodels.org/stable/index.html',
    'Source Code': 'https://github.com/statsmodels/statsmodels'
}

CLASSIFIERS = [
    'Development Status :: 4 - Beta', 'Environment :: Console',
Exemple #58
0
def test_tempdir():
    with tempdir() as tmpdir:
        fname = pjoin(tmpdir, 'example_file.txt')
        with open(fname, 'wt') as fobj:
            fobj.write('a string\\n')
    assert_(not exists(tmpdir))
import os
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.patheffects as pe
from matplotlib import cm
import torch
import argparse
import sys
from os.path import join as pjoin
BASEPATH = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, BASEPATH)
sys.path.insert(0, pjoin(BASEPATH, '..'))
sys.path.insert(0, pjoin(BASEPATH, '..', '..'))

from py_utils import to_float


"""
Motion info: 
    joint parents, foot_idx
"""
J = 21
parents = np.array([-1, 0, 1, 2, 3, 0, 5, 6, 7, 0, 9, 10, 11, 10, 13, 14, 15, 10, 17, 18, 19])
joint_foot_indices = [3, 4, 7, 8]
joint_sizes = [3 for i in range(J)]
head_index = 12
joint_sizes[head_index] = 7

"""
    def train(self):
        tic = time.time()
        params = tf.trainable_variables()
        num_params = sum(
            map(lambda t: np.prod(tf.shape(t.value()).eval()), params))
        toc = time.time()
        logging.info("Number of params: %d (retreival took %f secs)" %
                     (num_params, toc - tic))

        # train_context -> (num, 2, max_length)
        train_context = np.array(self.dataset['train_context'])
        train_question = np.array(self.dataset['train_question'])
        # train_answer -> (num, 2)
        train_answer = np.array(self.answers['train_answer'])

        if self.debug_num:
            assert isinstance(self.debug_num,
                              int), 'the debug number should be a integer'
            assert self.debug_num < len(train_answer), 'check debug number!'
            train_answer = train_answer[0:self.debug_num]
            train_context = train_context[0:self.debug_num]
            train_question = train_question[0:self.debug_num]
            print_every = 5

        num_example = len(train_answer)
        logging.info('num example is {}'.format(num_example))
        shuffle_list = np.arange(num_example)

        learning_rate = tf.train.exponential_decay(self.starter_learning_rate,
                                                   self.model.global_step,
                                                   1000,
                                                   0.9,
                                                   staircase=True)
        tf.summary.scalar('learning_rate', learning_rate)

        with tf.name_scope('optimizer'):
            optimizer = self.optimizer(learning_rate)
            grad_var = optimizer.compute_gradients(self.model.final_loss)
            grad = [i[0] for i in grad_var]
            var = [i[1] for i in grad_var]
            grad_norm = tf.global_norm(grad)
            tf.summary.scalar('grad_norm', grad_norm)
            grad, use_norm = tf.clip_by_global_norm(grad, self.max_grad_norm)
            train_op = optimizer.apply_gradients(
                zip(grad, var), global_step=self.model.global_step)

        saver = tf.train.Saver()
        merged = tf.summary.merge_all()

        losses = []
        norms = []
        train_evals = []
        val_evals = []
        iters = self.start_steps
        save_path = pjoin(self.train_dir, 'weights')
        batch_size = self.batch_size
        batch_num = int(num_example * 1.0 / batch_size)
        total_iterations = self.epochs * batch_num + self.start_steps

        config = tf.ConfigProto(allow_soft_placement=True)
        config.gpu_options.allow_growth = True
        with tf.Session(config=config) as sess:
            train_writer = tf.summary.FileWriter(
                self.summary_dir + str(self.start_lr), sess.graph)
            tic = time.time()

            for ep in range(self.epochs):
                np.random.shuffle(shuffle_list)
                train_context = train_context[shuffle_list]
                train_question = train_question[shuffle_list]
                train_answer = train_answer[shuffle_list]

                logging.info('training epoch ---- {}/{} -----'.format(
                    ep + 1, self.epochs))
                ep_loss = 0.

                for it in xrange(batch_num):
                    sys.stdout.write('> %d / %d \r' %
                                     (iters % print_every, print_every))
                    sys.stdout.flush()

                    context = train_context[it * batch_size:(it + 1) *
                                            batch_size]
                    question = train_question[it * batch_size:(it + 1) *
                                              batch_size]
                    answer = train_answer[it * batch_size:(it + 1) *
                                          batch_size]

                    input_feed = self.load_input_feed(context, question,
                                                      answer)
                    batch_final_loss = self.model.final_loss
                    summary, _, loss, grad_norm = sess.run(
                        [merged, train_op, batch_final_loss, grad_norm],
                        input_feed)

                    train_writer.add_summary(summary, iters)
                    ep_loss += loss
                    losses.append(loss)
                    norms.append(grad_norm)
                    iters += 1

                    if iters % print_every == 0:
                        toc = time.time()
                        logging.info(
                            'iters: {}/{} loss: {} norm: {}. time: {} secs'.
                            format(iters, total_iterations, loss, grad_norm,
                                   toc - tic))
                        tf1, tem, f1, em = self.evaluate_answer(
                            sess,
                            self.dataset,
                            self.raw_answers,
                            self.rev_vocab,
                            training=True,
                            log=True,
                            sample=self.sample)
                        train_evals.append((tf1, tem))
                        val_evals.append((f1, em))
                        tic = time.time()

                    if iters % self.save_every == 0:
                        saver.save(sess, save_path, global_step=iters)
                        self.evaluate_answer(sess,
                                             self.dataset,
                                             self.raw_answers,
                                             self.rev_vocab,
                                             training=True,
                                             log=True,
                                             sample=self.sample)

                if self.save_every_epoch:
                    saver.save(sess, save_path, global_step=iters)
                    self.evaluate_answer(sess,
                                         self.dataset,
                                         self.raw_answers,
                                         self.rev_vocab,
                                         training=True,
                                         log=True,
                                         sample=4000)
                logging.info('average loss of epoch {}/{} is {}'.format(
                    ep + 1, self.epochs, ep_loss / batch_num))

                data_dict = {
                    'losses': losses,
                    'norms': norms,
                    'train_eval': train_evals,
                    'val_eval': val_evals
                }
                c_time = time.strftime('%Y%m%d_%H%M', time.localtime())
                data_save_path = pjoin('cache',
                                       str(iters) + 'iters' + c_time + '.npz')
                np.savez(data_save_path, data_dict)