コード例 #1
0
def validate_once(true_cf = [pl.ones(3)/3.0, pl.ones(3)/3.0], true_std = 0.01*pl.ones(3), std_bias = [1., 1., 1.], save=False, dir='', i=0):
    """
    Generate a set of simulated estimates for the provided true cause fractions; Fit the bad model and 
    the latent simplex model to this simulated data and calculate quality metrics. 
    """ 
    
    # generate simulation data
    X = data.sim_data_for_validation(1000, true_cf, true_std, std_bias)

    # fit bad model, calculate fit metrics 
    bad_model = models.bad_model(X)
    bad_model_metrics = calc_quality_metrics(true_cf, true_std, std_bias, bad_model)
    retrieve_estimates(bad_model, True, 'bad_model', dir, i)
    
    # fit latent simplex model, calculate fit metrics 
    m, latent_simplex = models.fit_latent_simplex(X)
    latent_simplex_metrics = calc_quality_metrics(true_cf, true_std, std_bias, latent_simplex)
    retrieve_estimates(latent_simplex, True, 'latent_simplex', dir, i)
    
    # either write results to disk or return them 
    if save: 
        pl.rec2csv(bad_model_metrics, '%s/metrics_bad_model_%i.csv' % (dir, i)) 
        pl.rec2csv(latent_simplex_metrics, '%s/metrics_latent_simplex_%i.csv' % (dir, i))
    else: 
        return bad_model_metrics, latent_simplex_metrics
コード例 #2
0
ファイル: log_matrix.py プロジェクト: issfangks/milo-lab
def log_inv(X): # inverts a 3x3 matrix given by the logscale values
    if (X.shape[0] != X.shape[1]):
        raise Exception("X is not a square matrix and cannot be inverted")
    
    if (X.shape[0] == 1):
        return matrix((-X[0,0]))
    
    ldet = log_det(X)
    if (ldet == nan):
        raise Exception("The determinant of X is 0, cannot calculate the inverse")
     
    if (X.shape[0] == 2): # X is a 2x2 matrix
        I = (-log_det(X)) * ones((2,2))
        I[0,0] += X[1,1]
        I[0,1] += X[0,1] + complex(0, pi)
        I[1,0] += X[1,0] + complex(0, pi)
        I[1,1] += X[0,0]
        return I
    
    if (X.shape[0] == 3): # X is a 3x3 matrix
        I = (-log_det(X)) * ones((3,3))
        I[0,0] += log_subt_exp(X[1,1]+X[2,2], X[1,2]+X[2,1])
        I[0,1] += log_subt_exp(X[0,2]+X[2,1], X[0,1]+X[2,2])
        I[0,2] += log_subt_exp(X[0,1]+X[1,2], X[0,2]+X[1,1])
        I[1,0] += log_subt_exp(X[2,0]+X[1,2], X[1,0]+X[2,2])
        I[1,1] += log_subt_exp(X[0,0]+X[2,2], X[0,2]+X[2,0])
        I[1,2] += log_subt_exp(X[0,2]+X[1,0], X[0,0]+X[1,2])
        I[2,0] += log_subt_exp(X[1,0]+X[2,1], X[2,0]+X[1,1])
        I[2,1] += log_subt_exp(X[2,0]+X[0,1], X[0,0]+X[2,1])
        I[2,2] += log_subt_exp(X[0,0]+X[1,1], X[0,1]+X[1,0])
        return I
    
    raise Exception("log_inv is only implemented for matrices of size < 4")
コード例 #3
0
def run_on_cluster(dir='../data', true_cf = [pl.ones(3)/3.0, pl.ones(3)/3.0], true_std = 0.01*pl.ones(3), std_bias=[1.,1.,1.], reps=5, tag=''):
    """
    Runs validate_once multiple times (as specified by reps) for the given true_cf and 
    true_std. Combines the output and cleans up the temp files. This accomplished in 
    parallel on the cluster. This function requires that the files cluster_shell.sh 
    (which allows for submission of a job for each iteration), cluster_validate.py (which
    runs validate_once for each iteration), and cluster_validate_combine.py (which 
    runs combine_output all exist. The tag argument allows for adding a string to the job 
    names so that this function can be run multiple times simultaneously and not have 
    conflicts between jobs with the same name. 
    """

    T, J = pl.array(true_cf).shape  
    if os.path.exists(dir) == False: os.mkdir(dir)

    # write true_cf and true_std to file
    data.rec2csv_2d(pl.array(true_cf), '%s/truth_cf.csv' % (dir))
    data.rec2csv_2d(pl.array(true_std), '%s/truth_std.csv' % (dir))
    data.rec2csv_2d(pl.array([std_bias]), '%s/truth_bias.csv' % (dir))
    
    # submit all individual jobs to retrieve true_cf and true_std and run validate_once
    all_names = [] 
    for i in range(reps): 
        name = 'cc%s_%i' % (tag, i)
        call = 'qsub -cwd -N %s cluster_shell.sh cluster_validate.py %i "%s"' % (name, i, dir)
        subprocess.call(call, shell=True)
        all_names.append(name)
    
    # submit job to run combine_output and clean_up 
    hold_string = '-hold_jid %s ' % ','.join(all_names)
    call = 'qsub -cwd %s -N cc%s_comb cluster_shell.sh cluster_validate_combine.py %i "%s"' % (hold_string, tag, reps, dir)
    subprocess.call(call, shell=True)  
コード例 #4
0
    def sample(self, model, evidence):
        z = evidence['z']
        T, surfaces, sigma_g, sigma_h = [evidence[var] for var in ['T', 'surfaces', 'sigma_g', 'sigma_h']]
        mu_h, phi, sigma_z_g, sigma_z_h = [model.known_params[var] for var in ['mu_h', 'phi', 'sigma_z_g', 'sigma_z_h']]
        prior_mu_g, prior_cov_g = [model.hyper_params[var] for var in ['prior_mu_g', 'prior_cov_g']]
        prior_mu_h, prior_cov_h = [model.hyper_params[var] for var in ['prior_mu_h', 'prior_cov_h']]
        n = len(g)

        y = ma.asarray(ones((n, 2))*nan)
        if sum(T==1) > 0:
            y[T==1, 0] = z[T==1]
        if sum(T==2) > 0:
            y[T==2, 1] = z[T==2]
        y[isnan(y)] = ma.masked

        kalman = self._kalman
        kalman.initial_state_mean=[prior_mu_g[0], prior_mu_h[0]]
        kalman.initial_state_covariance=diag([prior_cov_g[0,0], prior_cov_h[0,0]])
        kalman.transition_matrices=[[1, 0], [0, phi]]
        kalman.transition_offsets =ones((n, 2))*[0, mu_h*(1-phi)]
        kalman.transition_covariance=[[sigma_g**2, 0], [0, sigma_h**2]]
        kalman.observation_matrices=[[1, 0], [1, 1]]
        kalman.observation_covariance=[[sigma_z_g**2, 0], [0, sigma_z_h**2]]
        sampled_surfaces = forward_filter_backward_sample(kalman, y)

        return sampled_surfaces
コード例 #5
0
def validate_once(true_cf=[pl.ones(3) / 3.0,
                           pl.ones(3) / 3.0],
                  true_std=0.01 * pl.ones(3),
                  std_bias=[1., 1., 1.],
                  save=False,
                  dir='',
                  i=0):
    """
    Generate a set of simulated estimates for the provided true cause fractions; Fit the bad model and 
    the latent simplex model to this simulated data and calculate quality metrics. 
    """

    # generate simulation data
    X = data.sim_data_for_validation(1000, true_cf, true_std, std_bias)

    # fit bad model, calculate fit metrics
    bad_model = models.bad_model(X)
    bad_model_metrics = calc_quality_metrics(true_cf, true_std, std_bias,
                                             bad_model)
    retrieve_estimates(bad_model, True, 'bad_model', dir, i)

    # fit latent simplex model, calculate fit metrics
    m, latent_simplex = models.fit_latent_simplex(X)
    latent_simplex_metrics = calc_quality_metrics(true_cf, true_std, std_bias,
                                                  latent_simplex)
    retrieve_estimates(latent_simplex, True, 'latent_simplex', dir, i)

    # either write results to disk or return them
    if save:
        pl.rec2csv(bad_model_metrics, '%s/metrics_bad_model_%i.csv' % (dir, i))
        pl.rec2csv(latent_simplex_metrics,
                   '%s/metrics_latent_simplex_%i.csv' % (dir, i))
    else:
        return bad_model_metrics, latent_simplex_metrics
コード例 #6
0
ファイル: plot_settings.py プロジェクト: garciaga/pynmd
def jetWoGn(reverse=False):
    """
    jetWoGn(reverse=False)
       - returning a colormap similar to cm.jet, but without green.
         if reverse=True, the map starts with red instead of blue.
    """
    m=18 # magic number, which works fine
    m0=pylab.floor(m*0.0)
    m1=pylab.floor(m*0.2)
    m2=pylab.floor(m*0.2)
    m3=pylab.floor(m/2)-m2-m1

    b_ = pylab.hstack( (0.4*pylab.arange(m1)/(m1-1.)+0.6, pylab.ones((m2+m3,)) ) )
    g_ = pylab.hstack( (pylab.zeros((m1,)),pylab.arange(m2)/(m2-1.),pylab.ones((m3,))) )
    r_ = pylab.hstack( (pylab.zeros((m1,)),pylab.zeros((m2,)),pylab.arange(m3)/(m3-1.)))

    r = pylab.hstack((r_,pylab.flipud(b_)))
    g = pylab.hstack((g_,pylab.flipud(g_)))
    b = pylab.hstack((b_,pylab.flipud(r_)))

    if reverse:
        r = pylab.flipud(r)
        g = pylab.flipud(g)
        b = pylab.flipud(b)

    ra = pylab.linspace(0.0,1.0,m)

    cdict = {'red': zip(ra,r,r),
            'green': zip(ra,g,g),
            'blue': zip(ra,b,b)}

    return LinearSegmentedColormap('new_RdBl',cdict,256)
コード例 #7
0
    def tempo_search(db, Key, tempo):
        """
        ::

            Static tempo-invariant search
            Returns search results for query resampled over a range of tempos.
        """
        if not db.configCheck():
            print "Failed configCheck in query spec."
            print db.configQuery
            return None
        prop = 1. / tempo  # the proportion of original samples required for new tempo
        qconf = db.configQuery.copy()
        X = db.retrieve_datum(Key)
        P = db.retrieve_datum(Key, powers=True)
        X_m = pylab.mat(X.mean(0))
        X_resamp = pylab.array(
            adb.resample_vector(X - pylab.mat(pylab.ones(X.shape[0])).T * X_m,
                                prop))
        X_resamp += pylab.mat(pylab.ones(X_resamp.shape[0])).T * X_m
        P_resamp = pylab.array(adb.resample_vector(P, prop))
        seqStart = int(pylab.around(qconf['seqStart'] * prop))
        qconf['seqStart'] = seqStart
        seqLength = int(pylab.around(qconf['seqLength'] * prop))
        qconf['seqLength'] = seqLength
        tmpconf = db.configQuery
        db.configQuery = qconf
        res = db.query_data(featData=X_resamp, powerData=P_resamp)
        res_resorted = adb.sort_search_result(res.rawData)
        db.configQuery = tmpconf
        return res_resorted
コード例 #8
0
ファイル: stats.py プロジェクト: mGolos/OldCodeSamples
def slidingAverage(x, N):
    tmp = convolve(
        x,
        ones((N, )) / N,
        mode='valid',
    )
    return r_[tmp[0] * ones(N / 2), tmp, tmp[-1] * ones(N / 2 - 1 + N % 2)]
コード例 #9
0
    def tempo_search(db, Key, tempo):
        """
        ::

            Static tempo-invariant search
            Returns search results for query resampled over a range of tempos.
        """
        if not db.configCheck():
            print "Failed configCheck in query spec."
            print db.configQuery
            return None
        prop = 1.0 / tempo  # the proportion of original samples required for new tempo
        qconf = db.configQuery.copy()
        X = db.retrieve_datum(Key)
        P = db.retrieve_datum(Key, powers=True)
        X_m = pylab.mat(X.mean(0))
        X_resamp = pylab.array(adb.resample_vector(X - pylab.mat(pylab.ones(X.shape[0])).T * X_m, prop))
        X_resamp += pylab.mat(pylab.ones(X_resamp.shape[0])).T * X_m
        P_resamp = pylab.array(adb.resample_vector(P, prop))
        seqStart = int(pylab.around(qconf["seqStart"] * prop))
        qconf["seqStart"] = seqStart
        seqLength = int(pylab.around(qconf["seqLength"] * prop))
        qconf["seqLength"] = seqLength
        tmpconf = db.configQuery
        db.configQuery = qconf
        res = db.query_data(featData=X_resamp, powerData=P_resamp)
        res_resorted = adb.sort_search_result(res.rawData)
        db.configQuery = tmpconf
        return res_resorted
コード例 #10
0
def log_inv(X):  # inverts a 3x3 matrix given by the logscale values
    if (X.shape[0] != X.shape[1]):
        raise Exception("X is not a square matrix and cannot be inverted")

    if (X.shape[0] == 1):
        return matrix((-X[0, 0]))

    ldet = log_det(X)
    if (ldet == nan):
        raise Exception(
            "The determinant of X is 0, cannot calculate the inverse")

    if (X.shape[0] == 2):  # X is a 2x2 matrix
        I = (-log_det(X)) * ones((2, 2))
        I[0, 0] += X[1, 1]
        I[0, 1] += X[0, 1] + complex(0, pi)
        I[1, 0] += X[1, 0] + complex(0, pi)
        I[1, 1] += X[0, 0]
        return I

    if (X.shape[0] == 3):  # X is a 3x3 matrix
        I = (-log_det(X)) * ones((3, 3))
        I[0, 0] += log_subt_exp(X[1, 1] + X[2, 2], X[1, 2] + X[2, 1])
        I[0, 1] += log_subt_exp(X[0, 2] + X[2, 1], X[0, 1] + X[2, 2])
        I[0, 2] += log_subt_exp(X[0, 1] + X[1, 2], X[0, 2] + X[1, 1])
        I[1, 0] += log_subt_exp(X[2, 0] + X[1, 2], X[1, 0] + X[2, 2])
        I[1, 1] += log_subt_exp(X[0, 0] + X[2, 2], X[0, 2] + X[2, 0])
        I[1, 2] += log_subt_exp(X[0, 2] + X[1, 0], X[0, 0] + X[1, 2])
        I[2, 0] += log_subt_exp(X[1, 0] + X[2, 1], X[2, 0] + X[1, 1])
        I[2, 1] += log_subt_exp(X[2, 0] + X[0, 1], X[0, 0] + X[2, 1])
        I[2, 2] += log_subt_exp(X[0, 0] + X[1, 1], X[0, 1] + X[1, 0])
        return I

    raise Exception("log_inv is only implemented for matrices of size < 4")
コード例 #11
0
def plotInit(Plotting, Elements):
	if (Plotting == 2):
		loc = [i.xy for i in Elements]
		x = [i.real for i in loc]
		y = [i.imag for i in loc]
		x = list(sorted(set(x))) 
		x.remove(-10)
		y = list(sorted(set(y)))

		X, Y = pylab.meshgrid(x, y)
		U = pylab.ones(shape(X))
		V = pylab.ones(shape(Y))

		pylab.ion()
		fig, ax = pylab.subplots(1,1)
		graph = ax.quiver(X, Y, U, V)
		pylab.draw()
	else:
		pylab.ion()
		graph, = pylab.plot(1, 'ro', markersize = 2) 
		x = 2
		pylab.axis([-x,x,x,-x])

		graph.set_xdata(0)
		graph.set_ydata(0)
		pylab.draw()

	return graph
コード例 #12
0
ファイル: common.py プロジェクト: DanielEColi/fnatool
def filter2d(x, y, axes=['y'], algos=['2sigma']):
    """
    Perform 2D data filtration by selected exes.
    In:
        x : ndarray, X vector
        y : ndarray, Y vector
        axes : list, axes names which are used to choose filtered values. x, y or any combination
    Out:
        xnew : ndarray, filtered X
        ynew : ndarray, filtered Y
    """
    xnew = pl.array(x, dtype='float')
    ynew = pl.array(y, dtype='float')
    mask_x = pl.ones(len(x), dtype='bool')
    mask_y = pl.ones(len(y), dtype='bool')
    if 'y' in axes:
        mask_y = filter1d(y,algos=algos)        
    if 'x' in axes:
        mask_x = filter1d(x,algos=algos)
    mask = mask_x * mask_y
    xnew *= mask
    ynew *= mask
    
    xnew = pl.ma.masked_equal(xnew,0)
    xnew = pl.ma.compressed(xnew)
    ynew = pl.ma.masked_equal(ynew,0)
    ynew = pl.ma.compressed(ynew)

    assert pl.shape(xnew) == pl.shape(ynew)
    return xnew, ynew
コード例 #13
0
ファイル: boxplot_percentile.py プロジェクト: boada/scripts
def example():

    from pylab import rand, ones, concatenate
    import matplotlib.pyplot as plt
    # EXAMPLE data code from:
    # http://matplotlib.sourceforge.net/pyplots/boxplot_demo.py
    # fake up some data
    spread= rand(50) * 100
    center = ones(25) * 50
    flier_high = rand(10) * 100 + 100
    flier_low = rand(10) * -100
    data =concatenate((spread, center, flier_high, flier_low), 0)

    # fake up some more data
    spread= rand(50) * 100
    center = ones(25) * 40
    flier_high = rand(10) * 100 + 100
    flier_low = rand(10) * -100
    d2 = concatenate( (spread, center, flier_high, flier_low), 0 )
    data.shape = (-1, 1)
    d2.shape = (-1, 1)
    #data = [data, d2, d2[::2,0]]
    data = [data, d2]

    fig = plt.figure()
    ax = fig.add_subplot(1,1,1)
    ax.set_xlim(0,4)
    percentile_box_plot(ax, data, [2,3])
    plt.show()
コード例 #14
0
def flow_gray(data):
    v = ((data > 0) *
         (data < 1)).reshape(list(data.shape) + [1]) * pylab.cm.gray(data)
    v += (data == 0).reshape(list(data.shape) + [1]) * pylab.array(
        [0, 1., 0, 0]).reshape(list(pylab.ones(data.ndim) + [3]))
    v += (data == 1).reshape(list(data.shape) + [1]) * pylab.array(
        [1., 0, 1., 0]).reshape(list(pylab.ones(data.ndim) + [3]))
    return v
コード例 #15
0
 def __init__(self):
   self.ai = ones(NN.ni)
   self.ah = ones(NN.nh)
   self.ao = ones(NN.no)
   self.wi = zeros((NN.ni, NN.nh))
   self.wo = zeros((NN.nh, NN.no))
   randomizeMatrix(self.wi, -0.2, 0.2)
   randomizeMatrix(self.wo, -2.0, 2.0)
コード例 #16
0
 def allocate(self,n):
     ni,ns,na            = self.dims
     vars_               = "cix ci gix gi gox go gfx gf"
     vars_               += " state output gierr gferr goerr cierr stateerr outerr"
     for v in vars_.split():
         setattr(self,v,nan*ones((n,ns)))
     self.source         = nan*ones((n,na))
     self.sourceerr      = nan*ones((n,na))
コード例 #17
0
ファイル: minilstm.py プロジェクト: tacohunter/ocropy
 def allocate(self, n):
     """Allocate space for the internal state variables.
     `n` is the maximum sequence length that can be processed."""
     ni, ns, na = self.dims
     vars = "cix ci gix gi gox go gfx gf"
     vars += " state output"
     for v in vars.split():
         setattr(self, v, nan * ones((n, ns)))
     self.source = nan * ones((n, na))
コード例 #18
0
ファイル: minilstm.py プロジェクト: dwohlfahrt/ocropy
 def allocate(self,n):
     """Allocate space for the internal state variables.
     `n` is the maximum sequence length that can be processed."""
     ni,ns,na = self.dims
     vars = "cix ci gix gi gox go gfx gf"
     vars += " state output"
     for v in vars.split():
         setattr(self,v,nan*ones((n,ns)))
     self.source = nan*ones((n,na))
コード例 #19
0
 def forward(self, xs):
     n = len(xs)
     inputs, ys, zs = [None] * n, [None] * n, [None] * n
     for i in range(n):
         inputs[i] = concatenate([ones(1), xs[i]])
         ys[i] = sigmoid(dot(self.W1, inputs[i]))
         ys[i] = concatenate([ones(1), ys[i]])
         zs[i] = sigmoid(dot(self.W2, ys[i]))
     self.state = (inputs, ys, zs)
     return zs
コード例 #20
0
ファイル: info.py プロジェクト: JohanComparat/pyLPT
def getParamCovMat(prefix,dlogpower = 2, theoconstmult = 1.,dlogfilenames = ['dlogpnldloga.dat'],volume=256.**3,startki = 0, endki = 0, veff = [0.]):
    """
    Calculates parameter covariance matrix from the power spectrum covariance matrix and derivative term
    in the prefix directory
    """
    nparams = len(dlogfilenames)

    kpnl = M.load(prefix+'pnl.dat')
    k = kpnl[startki:,0]

    nk = len(k)
    if (endki == 0):
        endki = nk
        
    pnl = M.array(kpnl[startki:,1],M.Float64)
    covarwhole = M.load(prefix+'covar.dat')
    covar = covarwhole[startki:,startki:]
    if len(veff) > 1:
        sqrt_veff = M.sqrt(veff[startki:])
    else:
        sqrt_veff = M.sqrt(volume*M.ones(nk))

    dlogs = M.reshape(M.ones(nparams*nk,M.Float64),(nparams,nk))
    paramFishMat = M.reshape(M.zeros(nparams*nparams*(endki-startki),M.Float64),(nparams,nparams,endki-startki))
    paramCovMat = paramFishMat * 0.

    # Covariance matrices of dlog's
    for param in range(nparams):
        if len(dlogfilenames[param]) > 0:
            dlogs[param,:] = M.load(prefix+dlogfilenames[param])[startki:,1]

    normcovar = M.zeros(M.shape(covar),M.Float64)
    for i in range(nk):
        normcovar[i,:] = covar[i,:]/(pnl*pnl[i])

    M.save(prefix+'normcovar.dat',normcovar)

    f = k[1]/k[0]

    if (volume == -1.):
        volume = (M.pi/k[0])**3

    #theoconst = volume * k[1]**3 * f**(-1.5)/(12.*M.pi**2) #1 not 0 since we're starting at 1
    for ki in range(1,endki-startki):
        for p1 in range(nparams):
            for p2 in range(nparams):
                paramFishMat[p1,p2,ki] = M.sum(M.sum(\
                M.inverse(normcovar[:ki+1,:ki+1]) *
                M.outerproduct(dlogs[p1,:ki+1]*sqrt_veff[:ki+1],\
                               dlogs[p2,:ki+1]*sqrt_veff[:ki+1])))
                
                
        paramCovMat[:,:,ki] = M.inverse(paramFishMat[:,:,ki])

    return k[1:],paramCovMat[:,:,1:]
コード例 #21
0
ファイル: TeraData.py プロジェクト: DavidJahn86/terapy
 def getDR(self):
     #this function should return the dynamic range
     #this should be the noiselevel of the fft
     noiselevel=py.sqrt(py.mean(abs(py.fft(self._tdData.getAllPrecNoise()[0]))**2))
     #apply a moving average filter on log
     window_size=5
     window=py.ones(int(window_size))/float(window_size)
     hlog=py.convolve(20*py.log10(self.getFAbs()), window, 'valid')
     one=py.ones((2,))
     hlog=py.concatenate((hlog[0]*one,hlog,hlog[-1]*one))
     return hlog-20*py.log10(noiselevel)         
コード例 #22
0
 def construct(self):
     first = pymc.Bernoulli('F', .6, value=pl.ones(self.obs))
     p_first = pymc.Lambda('p_F',
                           lambda R=R: pl.where(R, .005, .8),
                           doc='Pr[S|R]')
     second = pymc.Bernoulli('S', p_first, value=pl.ones(self.obs))
     p_G = mc.Lambda('p_G',
                     lambda S=S, R=R: pl.where(S, pl.where(R, .99, .9),
                                               pl.where(R, .8, 0.)),
                     doc='Pr[G|S,R]')
     G = mc.Bernoulli('G', p_G, value=G_obs, observed=True)
コード例 #23
0
ファイル: plasma_coil_object.py プロジェクト: zchmlk/Coil-GUI
    def __init__(self,
                 r_floop=0.5,
                 z_floop=0.0,
                 i_p_coil_filename='hitpops.05.txt',
                 tris_filename='hitpops.05.t3d'):

        self.r_floop = r_floop
        self.z_floop = z_floop

        # read equilibrium file
        i_p_coils = P.loadtxt(i_p_coil_filename, delimiter=',', dtype=fdtype)
        self.i_p_coils = i_p_coils

        r_p_coils_full = i_p_coils[:, 0]
        z_p_coils_full = i_p_coils[:, 1]
        # ??? what is this scale factor, something to do with mu_0 ???
        beta = i_p_coils[:, 3] * 6.28e7
        i_p_coils_full = i_p_coils[:, 2]

        self.r_p_coils_full = r_p_coils_full
        self.z_p_coils_full = z_p_coils_full
        self.beta = beta
        self.i_p_coils_full = i_p_coils_full

        # choose subset where current is not zero

        sub = P.where(i_p_coils_full != 0.0)

        r_p_coils = r_p_coils_full[sub]
        z_p_coils = z_p_coils_full[sub]
        i_p_coils = i_p_coils_full[sub]
        n_p_coils = len(r_p_coils)

        self.r_p_coils = r_p_coils
        self.z_p_coils = z_p_coils
        self.i_p_coils = i_p_coils
        self.n_p_coils = n_p_coils

        r_p_widths = P.ones(n_p_coils, dtype=fdtype) * 0.05
        z_p_widths = 1.0 * r_p_widths
        n_r_p_filaments = P.ones(n_p_coils, dtype=idtype)
        n_z_p_filaments = 1 * n_r_p_filaments

        self.r_p_widths = r_p_widths
        self.z_p_widths = z_p_widths
        self.n_r_p_filaments = n_r_p_filaments
        self.n_z_p_filaments = n_z_p_filaments

        # read in triangle unstructured mesh information
        rzt, tris, pt = t3dinp(tris_filename)

        self.rzt = rzt
        self.tris = tris
        self.pt = pt
コード例 #24
0
 def color_by_level(current_data):
     from pylab import vstack, contourf, plot, ones, arange, colorbar
     fs = current_data.framesoln
     pout, level = gridtools1.grid_output_1d(fs, 0, xout, return_level=True)
     Xout = vstack((xout, xout))
     Yout = vstack((-1.1 * ones(xout.shape), 1.1 * ones(xout.shape)))
     L = vstack((level, level))
     contourf(Xout, Yout, L, v_levels, colors=c_levels)
     cb = colorbar(ticks=range(1, maxlevels + 1))
     cb.set_label('AMR Level')
     plot(xout, pout, 'k')
コード例 #25
0
def draw_domain(f, x0, y0, x1, y1):
    number_of_levels = 41
    n = 101
    x = pylab.linspace(x0, x1, n)
    y = pylab.linspace(y0, y1, n)
    xx = (pylab.reshape(x, (n, 1)) * pylab.ones(n)).transpose()
    yy = pylab.reshape(y, (n, 1)) * pylab.ones(n)
    z = f([xx, yy])
    pylab.ion()
    pylab.contour(x, y, z, number_of_levels)
    pylab.draw()
コード例 #26
0
 def dewarp(self, img, cval=0, dtype=dtype('f')):
     assert img.shape == self.shape
     h, w = img.shape
     hpadding = self.r
     padded = vstack(
         [cval * ones((hpadding, w)), img, cval * ones((hpadding, w))])
     center = self.center + hpadding
     dewarped = [
         padded[center[i] - self.r:center[i] + self.r, i] for i in range(w)
     ]
     dewarped = array(dewarped, dtype=dtype).T
     return dewarped
コード例 #27
0
 def X_obs(pi=pi, sigma=sigma, value=X):
     logp = mc.normal_like(pl.array(value).ravel(), 
                           (pl.ones([N,J*T])*pl.array(pi).ravel()).ravel(), 
                           (pl.ones([N,J*T])*pl.array(sigma).ravel()).ravel()**-2)
     return logp
     
     logp = pl.zeros(N)
     for n in range(N):
         logp[n] = mc.normal_like(pl.array(value[n]).ravel(),
                                  pl.array(pi+beta).ravel(),
                                  pl.array(sigma).ravel()**-2)
     return mc.flib.logsum(logp - pl.log(N))
コード例 #28
0
ファイル: plasma_coil_object.py プロジェクト: zchmlk/Coil-GUI
    def __init__(self, r_floop=0.5, z_floop=0.0,
                 i_p_coil_filename='hitpops.05.txt',
                 tris_filename='hitpops.05.t3d'):

        self.r_floop = r_floop
        self.z_floop = z_floop

        # read equilibrium file
        i_p_coils = P.loadtxt(i_p_coil_filename, delimiter=',', dtype=fdtype)
        self.i_p_coils = i_p_coils

        r_p_coils_full = i_p_coils[:, 0]
        z_p_coils_full = i_p_coils[:, 1]
        # ??? what is this scale factor, something to do with mu_0 ???
        beta = i_p_coils[:, 3] * 6.28e7
        i_p_coils_full = i_p_coils[:, 2]

        self.r_p_coils_full = r_p_coils_full
        self.z_p_coils_full = z_p_coils_full
        self.beta = beta
        self.i_p_coils_full = i_p_coils_full

        # choose subset where current is not zero

        sub = P.where(i_p_coils_full != 0.0)

        r_p_coils = r_p_coils_full[sub]
        z_p_coils = z_p_coils_full[sub]
        i_p_coils = i_p_coils_full[sub]
        n_p_coils = len(r_p_coils)

        self.r_p_coils = r_p_coils
        self.z_p_coils = z_p_coils
        self.i_p_coils = i_p_coils
        self.n_p_coils = n_p_coils

        r_p_widths = P.ones(n_p_coils, dtype=fdtype) * 0.05
        z_p_widths = 1.0 * r_p_widths
        n_r_p_filaments = P.ones(n_p_coils, dtype=idtype)
        n_z_p_filaments = 1 * n_r_p_filaments

        self.r_p_widths = r_p_widths
        self.z_p_widths = z_p_widths
        self.n_r_p_filaments = n_r_p_filaments
        self.n_z_p_filaments = n_z_p_filaments

        # read in triangle unstructured mesh information
        rzt, tris, pt = t3dinp(tris_filename)

        self.rzt = rzt
        self.tris = tris
        self.pt = pt
コード例 #29
0
    def _istftm(self,
                X_hat=None,
                Phi_hat=None,
                pvoc=False,
                usewin=True,
                resamp=None):
        """
        :: 
            Inverse short-time Fourier transform magnitude. Make a signal from a |STFT| transform.
            Uses phases from self.STFT if Phi_hat is None.

            Inputs:
            X_hat - N/2+1 magnitude STFT [None=abs(self.STFT)]
            Phi_hat - N/2+1 phase STFT   [None=exp(1j*angle(self.STFT))]
            pvoc - whether to use phase vocoder [False]      
            usewin - whether to use overlap-add [False]

            Returns:
             x_hat - estimated signal
        """
        if not self._have_stft:
            return None
        X_hat = self.X if X_hat is None else P.np.abs(X_hat)
        if pvoc:
            self._pvoc(X_hat, Phi_hat, pvoc)
        else:
            Phi_hat = P.angle(self.STFT) if Phi_hat is None else Phi_hat
            self.X_hat = X_hat * P.exp(1j * Phi_hat)
        if usewin:
            if self.win is None:
                self.win = P.ones(
                    self.wfft) if self.window == 'rect' else P.np.sqrt(
                        P.hanning(self.wfft))
            if len(self.win) != self.nfft:
                self.win = P.r_[self.win, P.np.zeros(self.nfft - self.wfft)]
            if len(self.win) != self.nfft:
                error.BregmanError(
                    "features_base.Features._istftm(): assertion failed len(self.win)==self.nfft"
                )
        else:
            self.win = P.ones(self.nfft)
        if resamp:
            self.win = sig.resample(self.win,
                                    int(P.np.round(self.nfft * resamp)))
        fp = self._check_feature_params()
        self.x_hat = self._overlap_add(P.real(P.irfft(self.X_hat.T)),
                                       usewin=usewin,
                                       resamp=resamp)
        if self.verbosity:
            print("Extracted iSTFTM->self.x_hat")
        return self.x_hat
コード例 #30
0
ファイル: physicsClass.py プロジェクト: m4ntra/OOPSbackup
	def interpolate(self,x):
		if x.shape != (self.M.nelx,self.M.nelz):
			print(x.shape)
			print(self.M.nelx,self.M.nelz)
			raise Exception("The input design field does not match the shape expected by the model object")
		if not (self.pol in ['Ey','Hy']):
			raise ValueError('The polarisation has to be set to either Ey or Hy.')

		##Set material parameters depending on whether E or H field is solved for
		# Starting with the boundaries
		if self.pol == 'Ey': 
			self.AIn  = 1
			self.AOut = 1
			self.BIn  = _nToEps(self.nIn)
			self.BOut = _nToEps(self.nOut)
		elif self.pol == 'Hy':
			self.AIn  = 1/_nToEps(self.nIn)
			self.AOut = 1/_nToEps(self.nOut)
			self.BIn  = 1
			self.BOut = 1

		A = pl.ones(x.shape,dtype='complex')
		B = pl.ones(x.shape,dtype='complex')

		if self.interpolationType == 'materialBased':
			if self.pol == 'Ey': 
				for i in range(len(self.materials)):
					B[x==i] = _nToEps(self.materials[i])
			elif self.pol == 'Hy':
				for i in range(len(self.materials)):
					A[x==i] = 1./_nToEps(self.materials[i])

		elif self.interpolationType == 'inputBased':
			if self.pol == 'Ey': 
				B[:] = _nToEps(x)
			elif self.pol == 'Hy':
				A[:] = 1./_nToEps(x)
		else:
			raise ValueError('interpolationType has to be either materialBased or inputBased')

		if abs(A[:,0]-self.AIn).max() or abs(B[:,0]-self.BIn).max():
			warnings.warn("The material parameters at the top interface does not correspond "+
								"to the specified nIn. Results will probably be flawed",Warning)
		if abs(A[:,-1]-self.AOut).max() or abs(B[:,-1]-self.BOut).max():
			print(abs(A[:,-1]-self.AOut).max())
			print(abs(B[:,-1]-self.BOut).max())
			#print(B[:,-1])
			#print(self.BOut)
			warnings.warn("The material parameters at the bottom interface does not correspond "+
								"to the specified nOut. Results will probably be flawed",Warning)
		return A,B
コード例 #31
0
ファイル: models.py プロジェクト: afcarl/pymc-cod-correct
    def X_obs(pi=pi, sigma=sigma, value=X):
        logp = mc.normal_like(
            pl.array(value).ravel(),
            (pl.ones([N, J * T]) * pl.array(pi).ravel()).ravel(),
            (pl.ones([N, J * T]) * pl.array(sigma).ravel()).ravel()**-2)
        return logp

        logp = pl.zeros(N)
        for n in range(N):
            logp[n] = mc.normal_like(
                pl.array(value[n]).ravel(),
                pl.array(pi + beta).ravel(),
                pl.array(sigma).ravel()**-2)
        return mc.flib.logsum(logp - pl.log(N))
コード例 #32
0
ファイル: aufgabe3.py プロジェクト: mdbug/numerik
def system(m):
    n = m*m

    e = ones(n)
    l = ones(n)
    l[m-1::m] = 0.0
    r = ones(n)
    r[::m] = 0.0

    A = spdiags([-e, -l, 4.0*e, -r, -e], [-m, -1, 0, 1, m], n, n, format='csr')

    b = -e / float(n)

    return A, b
コード例 #33
0
ファイル: evaluate.py プロジェクト: woodshop/BregmanToolkit
    def evaluate(self,
                 seq_length=None,
                 query_duration=None,
                 tempo=1.0,
                 gt_only=True):
        """ 
        ::

            Evaluate loop over ground truth: query_duration varies with respect to tempo:
              query_duration - fractional seconds (requires adb.delta_time)
               OR seq_length - integer length of query sequence
              gt_only = if True, return only ground-truth results otherwise return full database results
        """
        if not tempo: tempo = 1.0
        seq_length = self.set_seq_length(seq_length, query_duration)
        lzt_keys, lzt_lengths = self.get_adb_lists()
        ranks = pylab.ones(
            (len(self.ground_truth), len(lzt_keys))) * float('inf')
        dists = pylab.ones(
            (len(self.ground_truth), len(lzt_keys))) * float('inf')
        gt_list, gt_orig = self.initialize_search(seq_length, tempo)
        gt_orig_keys, gt_orig_lengths = zip(*gt_orig)
        gt_keys, gt_lengths = zip(*gt_list)

        # Loop over ground truth keys
        self.adb.configQuery['seqLength'] = seq_length
        for i, q in enumerate(gt_keys):
            # Search
            if tempo == 1.0:
                res = self.adb.query(key=q).rawData
            else:
                res = audiodb.adb.tempo_search(db=self.adb, Key=q, tempo=tempo)
            r_keys, r_dists, q_pos, r_pos = zip(*res)
            q_idx = gt_orig_keys.index(q)
            for r_idx, s in enumerate(lzt_keys):
                try:
                    k = r_keys.index(s)
                    ranks[q_idx][r_idx] = k
                    dists[q_idx][r_idx] = r_dists[k]
                except ValueError:
                    # print "Warning: adb key ", s, "not found in result."
                    pass
        self.ranks = ranks
        self.dists = dists
        if gt_only:
            ranks, dists = self.reduce_evaluation_to_gt(
                ranks, dists, query_duration=query_duration)
        return ranks, dists
コード例 #34
0
ファイル: avkm_server.py プロジェクト: kghose/gobesh
def gabor_patch(
        sigma_deg=2,
        radius_deg=6,
        px_deg=50,
        sf_cyc_deg=2,
        phase_deg=0,  #phase of cosine in degrees
        contrast=1.0):
    """Return a gabor patch texture of the given dimensions and parameters."""

    height = width = radius_deg * px_deg
    x = pylab.linspace(-radius_deg, radius_deg, width)
    X, Y = pylab.meshgrid(x, x)
    L = pylab.exp(-(X**2 + Y**2) / sigma_deg**2)  #gaussian envelope
    #use around to round towards zero, otherwise you will get banding artifacts
    #dtype must be int for proper conversion to int and init of image data
    #I = pylab.array(-pylab.zeros(X.size)*max_range + neutral_gray, dtype='int')
    I = pylab.array(pylab.around(
        contrast * pylab.cos(2 * pylab.pi *
                             (sf_cyc_deg) * X + phase_deg * pylab.pi / 180.) *
        L * max_range) + neutral_gray,
                    dtype='int').ravel()
    IA = pylab.ones(I.size * 2, dtype='int') * 255
    IA[:-1:2] = I  #Need alpha=255 otherwise image is mixed with background
    #Data format for image http://www.pyglet.org/doc/programming_guide/accessing_or_providing_pixel_data.html
    data = array.array('B', IA)
    gabor = pyglet.image.ImageData(width, height, 'IA', data.tostring())
    return gabor
コード例 #35
0
ファイル: evaluate.py プロジェクト: BinRoot/BregmanToolkit
    def rank_by_distance_bhatt(self, qkeys, ikeys, rkeys, dists):
        """
        ::

            Reduce timbre-channel distances to ranks list by ground-truth key indices
            Bhattacharyya distance on timbre-channel probabilities and Kullback distances
        """
        # timbre-channel search using pre-computed distances
        ranks_list = []
        t_keys, t_lens = self.get_adb_lists(0) 
        rdists=pylab.ones(len(t_keys))*float('inf')
        qk = self._get_probs_tc(qkeys)
        for i in range(len(ikeys[0])): # number of include keys
            ikey=[]
            dk = pylab.zeros(self.timbre_channels)
            for t_chan in range(self.timbre_channels): # timbre channels
                ikey.append(ikeys[t_chan][i])
                try: 
                    # find dist of key i for query
                    i_idx = rkeys[t_chan].index( ikey[t_chan] ) # dataset include-key match
                    # the reduced distance function in include_keys order
                    # distance is Bhattacharyya distance on probs and dists
                    dk[t_chan] = dists[t_chan][i_idx]
                except:
                    print "Key not found in result list: ", ikey, "for query:", qkeys[t_chan]
                    raise error.BregmanError()
            rk = self._get_probs_tc(ikey)
            a_idx = t_keys.index( ikey[0] ) # audiodb include-key index
            rdists[a_idx] = distance.bhatt(pylab.sqrt(pylab.absolute(dk)), pylab.sqrt(pylab.absolute(qk*rk)))
        #search for the index of the relevant keys
        rdists = pylab.absolute(rdists)
        sort_idx = pylab.argsort(rdists)   # Sort fields into database order
        for r in self.ground_truth: # relevant keys
            ranks_list.append(pylab.where(sort_idx==r)[0][0]) # Rank of the relevant key
        return ranks_list, rdists
コード例 #36
0
    def _istftm(self, X_hat=None, Phi_hat=None, pvoc=False, usewin=True, resamp=None):
        """
        :: 
            Inverse short-time Fourier transform magnitude. Make a signal from a |STFT| transform.
            Uses phases from self.STFT if Phi_hat is None.

            Inputs:
            X_hat - N/2+1 magnitude STFT [None=abs(self.STFT)]
            Phi_hat - N/2+1 phase STFT   [None=exp(1j*angle(self.STFT))]
            pvoc - whether to use phase vocoder [False]      
            usewin - whether to use overlap-add [False]

            Returns:
             x_hat - estimated signal
        """
        if not self._have_stft:
                return None
        X_hat = P.np.abs(self.STFT) if X_hat is None else P.np.abs(X_hat)
        if pvoc:
            self._pvoc(X_hat, Phi_hat, pvoc)
        else:
            Phi_hat = P.angle(self.STFT) if Phi_hat is None else Phi_hat
            self.X_hat = X_hat *  P.exp( 1j * Phi_hat )
        if usewin:
            self.win = P.hanning(self.nfft) 
            self.win *= 1.0 / ((float(self.nfft)*(self.win**2).sum())/self.nhop)
        else:
            self.win = P.ones(self.nfft)
        if resamp:
            self.win = sig.resample(self.win, int(P.np.round(self.nfft * resamp)))
        fp = self._check_feature_params()
        self.x_hat = self._overlap_add(P.real(self.nfft * P.irfft(self.X_hat.T)), usewin=usewin, resamp=resamp)
        if self.verbosity:
            print "Extracted iSTFTM->self.x_hat"        
        return self.x_hat
コード例 #37
0
def main():
    """
    This shows the use of SynChan with Izhikevich neuron. This can be
    used for creating a network of Izhikevich neurons.
    """
    
    simtime = 200.0
    stepsize = 10.0
    model_dict = make_model()
    vm, inject, gk, spike = setup_data_recording(model_dict['neuron'],
                                          model_dict['pulse'],
                                          model_dict['synapse'],
                                          model_dict['spike_in'])
    mutils.setDefaultDt(elecdt=0.01, plotdt2=0.25)
    mutils.assignDefaultTicks(solver='ee')
    moose.reinit()
    mutils.stepRun(simtime, stepsize)
    pylab.subplot(411)
    pylab.plot(pylab.linspace(0, simtime, len(vm.vector)), vm.vector, label='Vm (mV)')
    pylab.legend()
    pylab.subplot(412)
    pylab.plot(pylab.linspace(0, simtime, len(inject.vector)), inject.vector, label='Inject (uA)')
    pylab.legend()
    pylab.subplot(413)
    pylab.plot(spike.vector, pylab.ones(len(spike.vector)), '|', label='input spike times')
    pylab.legend()
    pylab.subplot(414)
    pylab.plot(pylab.linspace(0, simtime, len(gk.vector)), gk.vector, label='Gk (mS)')
    pylab.legend()
    pylab.show()
コード例 #38
0
 def _stft(self):
     if not self._have_x:
         print(
             "Error: You need to load a sound file first: use self.load_audio('filename.wav')"
         )
         return False
     fp = self._check_feature_params()
     num_frames = len(self.x)
     self.STFT = P.zeros((int(self.nfft / 2 + 1), num_frames),
                         dtype='complex')
     self.win = P.ones(self.wfft) if self.window == 'rect' else P.np.sqrt(
         P.hanning(self.wfft))
     x = P.zeros(self.wfft)
     buf_frames = 0
     for k, nex in enumerate(self.x):
         x = self._shift_insert(x, nex, self.nhop)
         # align buffer on start of audio
         if self.nhop >= self.wfft - k * self.nhop:
             self.STFT[:, k - buf_frames] = P.rfft(self.win * x,
                                                   self.nfft).T
         else:
             buf_frames += 1
     self.STFT = self.STFT / self.nfft
     self._fftfrqs = P.arange(
         0, self.nfft / 2 + 1) * self.sample_rate / float(self.nfft)
     self._have_stft = True
     if self.verbosity:
         print("Extracted STFT: nfft=%d, hop=%d" % (self.nfft, self.nhop))
     self.inverse = self._istftm
     self.X = abs(self.STFT)
     if not self.magnitude:
         self.X = self.X**2
     return True
コード例 #39
0
 def get_dummy_Map(self):
     self.__init__()
     segment_number = 1
     segment = 0xFFFF*ones(11520, dtype = ushort)
     return struct.pack('>11524H',self.generation_date, self.generation_time,
                        self.number_of_segments, segment_number, *segment)
     
コード例 #40
0
ファイル: evaluate.py プロジェクト: ctlabvn/BregmanToolkit
    def rank_by_distance_bhatt(self, qkeys, ikeys, rkeys, dists):
        """
        ::

            Reduce timbre-channel distances to ranks list by ground-truth key indices
            Bhattacharyya distance on timbre-channel probabilities and Kullback distances
        """
        # timbre-channel search using pre-computed distances
        ranks_list = []
        t_keys, t_lens = self.get_adb_lists(0) 
        rdists=pylab.ones(len(t_keys))*float('inf')
        qk = self._get_probs_tc(qkeys)
        for i in range(len(ikeys[0])): # number of include keys
            ikey=[]
            dk = pylab.zeros(self.timbre_channels)
            for t_chan in range(self.timbre_channels): # timbre channels
                ikey.append(ikeys[t_chan][i])
                try: 
                    # find dist of key i for query
                    i_idx = rkeys[t_chan].index( ikey[t_chan] ) # dataset include-key match
                    # the reduced distance function in include_keys order
                    # distance is Bhattacharyya distance on probs and dists
                    dk[t_chan] = dists[t_chan][i_idx]
                except:
                    print("Key not found in result list: ", ikey, "for query:", qkeys[t_chan])
                    raise error.BregmanError()
            rk = self._get_probs_tc(ikey)
            a_idx = t_keys.index( ikey[0] ) # audiodb include-key index
            rdists[a_idx] = distance.bhatt(pylab.sqrt(pylab.absolute(dk)), pylab.sqrt(pylab.absolute(qk*rk)))
        #search for the index of the relevant keys
        rdists = pylab.absolute(rdists)
        sort_idx = pylab.argsort(rdists)   # Sort fields into database order
        for r in self.ground_truth: # relevant keys
            ranks_list.append(pylab.where(sort_idx==r)[0][0]) # Rank of the relevant key
        return ranks_list, rdists
コード例 #41
0
ファイル: evaluate.py プロジェクト: ctlabvn/BregmanToolkit
    def rank_by_distance_avg(self, qkeys, ikeys, rkeys, dists):
        """
        ::

            Reduce timbre-channel distances to ranks list by ground-truth key indices
            Kullback distances
        """
        # timbre-channel search using pre-computed distances
        ranks_list = []
        t_keys, t_lens = self.get_adb_lists(0) 
        rdists=pylab.ones(len(t_keys))*float('inf')
        for t_chan in range(self.timbre_channels): # timbre channels
            t_keys, t_lens = self.get_adb_lists(t_chan) 
            for i, ikey in enumerate(ikeys[t_chan]): # include keys, results
                try: 
                    # find dist of key i for query
                    i_idx = rkeys[t_chan].index( ikey ) # lower_bounded include-key index
                    a_idx = t_keys.index( ikey ) # audiodb include-key index
                    # the reduced distance function in include_keys order
                    # distance is the sum for now
                    if t_chan:
                        rdists[a_idx] += dists[t_chan][i_idx]
                    else:
                        rdists[a_idx] = dists[t_chan][i_idx]
                except:
                    print("Key not found in result list: ", ikey, "for query:", qkeys[t_chan])
                    raise error.BregmanError()
        #search for the index of the relevant keys
        rdists = pylab.absolute(rdists)
        sort_idx = pylab.argsort(rdists)   # Sort fields into database order
        for r in self.ground_truth: # relevant keys
            ranks_list.append(pylab.where(sort_idx==r)[0][0]) # Rank of the relevant key
        return ranks_list, rdists
コード例 #42
0
ファイル: stats.py プロジェクト: jeronimozc/neurapy
def bin_confint_lookup(pc, nsamp, ci=.05):
    """Return the confidence interval from the lookup table.
  Inputs:
    pc - array (get back several cis) or single value (get back one ci) of percent corrects
    nsamp - number of trials used to obtain each pc
    ci - confidence level (e.g. 0.01, 0.05)
    bootstraps - number of bootstraps to use
    use_table - if true then use a precomputed table instead of doing the bootstraps

  Output:
    3xN array - first row is pc
                last two rows are lower and upper ci as expected by pylab.errorbar
  """
    points = ci_table['points']
    values_lo = ci_table['values_lo']
    values_high = ci_table['values_high']

    from scipy.interpolate import griddata
    if pylab.isscalar(pc):
        pc = pylab.array([pc])
        nsamp = pylab.array([nsamp])
    ci_a = pylab.ones(pc.size) * ci
    xi = pylab.array((pc, nsamp, ci_a)).T

    low_ci = griddata(points, values_lo, xi, method='linear')
    high_ci = griddata(points, values_high, xi, method='linear')

    return pylab.array((pc, low_ci, high_ci))
コード例 #43
0
def define_model(data):
    # Builds model object
    n = len(data)
    variable_names = ['g', 'sigma_g', 'p_type', 'T']
    known_params = {'sigma_z_g': sigma_z_g}
    hyper_params = {'prior_mu_g': 0*ones(n),
                    'prior_cov_g': 100*eye(n),
                    'alpha_type': (1., 1.),
                    'a_g': 3.,
                    'b_g': 1.}
    priors = {'sigma_g': stats.invgamma(hyper_params['a_g'], scale=hyper_params['b_g']),
              'p_type': dirichlet(hyper_params['alpha_type']),
              'T': iid_dist(categorical((1., 1.)), n),
              'g': mvnorm(hyper_params['prior_mu_g'], hyper_params['prior_cov_g'])}
    #initials = {'g': g[:n],
    #            'sigma_g': sigma_g}
    FCP_samplers = {'g': ground_height_step(),
                    'p_type': p_type_step(),
                    'T': type_step(),
                    'sigma_g': sigma_ground_step()}

    model = Model()
    model.set_variable_names(variable_names)
    model.set_known_params(known_params)
    model.set_hyper_params(hyper_params)
    model.set_priors(priors)
    #model.set_initials(initials)
    model.set_FCP_samplers(FCP_samplers)
    model.set_data(data)

    return model
コード例 #44
0
ファイル: opacity.py プロジェクト: keflavich/casa
def atm(freqGHz,temp,humi,press,height):
  """Use the ATM model in CASA to calculate the opacity given the
  surface weather data and frequency. Temperature should be in Kelvin,
  pressure should be actual surface pressure in mbar (not adjusted to equivalent
  sea level pressure) and height should be in meters above mean sea level"""
  tmp = qa.quantity(temp, 'K')
  pre = qa.quantity(press, 'mbar')
  hum = humi
  alt = qa.quantity(height, 'm')
  h0  = qa.quantity(1.54, 'km')
  wvl = qa.quantity(-6.5, 'K/km')
  mxA = qa.quantity(10, 'km')
  dpr = qa.quantity(10.0, 'mbar')
  dpm = 1.2
  att = 3 # 3 = mid lat, winter
  myatm = at.initAtmProfile(alt, tmp, pre, mxA, hum, wvl, dpr, dpm, h0, att)

  # set spectral range to compute values for
  nb = len(freqGHz)
  fC = qa.quantity(freqGHz, 'GHz')
  fW = qa.quantity(pl.ones(nb), 'GHz')
  fR = qa.quantity(pl.zeros(nb), 'GHz')
  at.initSpectralWindow(nb, fC, fW, fR)
  fr=pl.zeros(nb)
  op=pl.zeros(nb)
  for i in range(nb):
    fr[i] = at.getSpectralWindow(i)['value']/1e9
    op[i] = at.getDryOpacitySpec(i)[1]+at.getWetOpacitySpec()[1]['value']
  return (fr,op)
コード例 #45
0
ファイル: moviemaker.py プロジェクト: kghose/neurapy
def parse_task_object_data(bhv):
  """Convert all the objects into image data and parse their initial positions."""
  obj_data = bhv['Stimuli']['Pic'] #Only handling pics now
  obj_r = re.compile("(\w+)\(") #Regexp to find task object description
  args_r = re.compile("([-.\w]+)[,\)]")#Regexp to extract arguments
  to = bhv['TaskObject']

  objects = []
  initial_pos = []
  for n in xrange(len(to)):
    oname = obj_r.findall(to[n][0])[0]
    if oname == 'fix':
      odata = pylab.ones((5,5,3),dtype=float)#Arbitrary square for FP
      args = args_r.findall(to[n][0])
      p = [float(p) for p in args]
    elif oname =='pic':
      args = args_r.findall(to[n][0])
      picname = args[0] #First one is object name
      p = [float(p) for p in args[1:]]
      for oidx in xrange(len(obj_data)):
        if obj_data[oidx]['Name'] == picname:
          odata = obj_data[oidx]['Data']/255.0 #matplotlib needs [0,1]
          break
    else:
      odata = pylab.zeros((4,4,3))
      logger.error('Could not find object')

    objects.append(odata)
    initial_pos.append(p)

  return objects, pylab.array(initial_pos)
コード例 #46
0
ファイル: datagen.py プロジェクト: albert4git/aTest
def datagen(N):
    """
    Produces N pairs of training data and desired output;
    each sample of training data contains -1 in its first position,
    this corresponds to the interpretation of the threshold as first
    element of the weight vector
    """

    fun1 = lambda x1,x2: -2*x1**3-x2+.5*x1**2
    fun2 = lambda x1,x2: x1**2*x2+2*x1*x2+1
    fun3 = lambda x1,x2: .5*x1*x2**2+x2**2-2*x1**2
    
    rarr1 = rand(1,N)
    rarr2 = rand(1,N)
    
    teacher = sign(rand(1,N)-.5)
    
    idplus  = (teacher<0)
    idminus = -idplus
    
    rarr1[idplus] = rarr1[idplus]-1
    
    y1=fun1(rarr1,rarr2)
    y2=fun2(rarr1,rarr2)
    y3=fun3(rarr1,rarr2)
    
    x=transpose(concatenate((-ones((1,N)),y1,y2)))
    
    return x, teacher[0]
コード例 #47
0
ファイル: stats.py プロジェクト: mGolos/OldCodeSamples
def random2HemiPermutations(mat):
    """Return a matrix with elements of a triangle permuted without saving the degrees.
    """
    s = mat.shape
    N = s[0]
    N2 = N / 2
    L = N * (N - 2) / 8
    nruter = array(mat)

    Sind = triSup(
        arange(N2 * N2).reshape((N2, N2)) +
        arange(0, N2 * N2, N2).reshape((N2, 1)))
    Mind = (arange(N2 * N2).reshape(
        (N2, N2)) + arange(N2, N2 * N2 + 1, N2).reshape(
            (N2, 1))).reshape(N2 * N2)
    Iind = triSup(ones((N, N)), ind=1)[-L:]
    Salea = permutation(Sind)
    Malea = permutation(Mind)
    Ialea = permutation(Iind)

    nruter[unravel_index(Sind, s)] = nruter[unravel_index(Salea, s)]
    nruter[unravel_index(Mind, s)] = nruter[unravel_index(Malea, s)]
    nruter[unravel_index(Iind, s)] = nruter[unravel_index(Ialea, s)]
    for i in range(s[0]):
        nruter[i:, i] = nruter[i, i:]

    return nruter
コード例 #48
0
ファイル: evaluate.py プロジェクト: BinRoot/BregmanToolkit
    def rank_by_distance_avg(self, qkeys, ikeys, rkeys, dists):
        """
        ::

            Reduce timbre-channel distances to ranks list by ground-truth key indices
            Kullback distances
        """
        # timbre-channel search using pre-computed distances
        ranks_list = []
        t_keys, t_lens = self.get_adb_lists(0) 
        rdists=pylab.ones(len(t_keys))*float('inf')
        for t_chan in range(self.timbre_channels): # timbre channels
            t_keys, t_lens = self.get_adb_lists(t_chan) 
            for i, ikey in enumerate(ikeys[t_chan]): # include keys, results
                try: 
                    # find dist of key i for query
                    i_idx = rkeys[t_chan].index( ikey ) # lower_bounded include-key index
                    a_idx = t_keys.index( ikey ) # audiodb include-key index
                    # the reduced distance function in include_keys order
                    # distance is the sum for now
                    if t_chan:
                        rdists[a_idx] += dists[t_chan][i_idx]
                    else:
                        rdists[a_idx] = dists[t_chan][i_idx]
                except:
                    print "Key not found in result list: ", ikey, "for query:", qkeys[t_chan]
                    raise error.BregmanError()
        #search for the index of the relevant keys
        rdists = pylab.absolute(rdists)
        sort_idx = pylab.argsort(rdists)   # Sort fields into database order
        for r in self.ground_truth: # relevant keys
            ranks_list.append(pylab.where(sort_idx==r)[0][0]) # Rank of the relevant key
        return ranks_list, rdists
コード例 #49
0
ファイル: test_covariates.py プロジェクト: aflaxman/gbd
def test_covariate_model_dispersion():
    # simulate normal data
    n = 100

    model = data.ModelData()
    model.hierarchy, model.output_template = data_simulation.small_output()

    Z = mc.rcategorical([.5, 5.], n)
    zeta_true = -.2

    pi_true = .1
    ess = 10000.*pl.ones(n)
    eta_true = pl.log(50)
    delta_true = 50 + pl.exp(eta_true)

    p = mc.rnegative_binomial(pi_true*ess, delta_true*pl.exp(Z*zeta_true)) / ess

    
    model.input_data = pandas.DataFrame(dict(value=p, z_0=Z))
    model.input_data['area'] = 'all'
    model.input_data['sex'] = 'total'
    model.input_data['year_start'] = 2000
    model.input_data['year_end'] = 2000



    # create model and priors
    vars = dict(mu=mc.Uninformative('mu_test', value=pi_true))
    vars.update(covariate_model.mean_covariate_model('test', vars['mu'], model.input_data, {}, model, 'all', 'total', 'all'))
    vars.update(covariate_model.dispersion_covariate_model('test', model.input_data, .1, 10.))
    vars.update(rate_model.neg_binom_model('test', vars['pi'], vars['delta'], p, ess))

    # fit model
    m = mc.MCMC(vars)
    m.sample(2)
コード例 #50
0
ファイル: plot_tuneshift.py プロジェクト: like2000/Pyheana
def plot_tuneshifts_2(ax, spectrum, scan_values, Qs=1, fitrange=None, fittype=None):

    (spectral_lines, spectral_intensity) = spectrum

    # Normalize power.
    normalized_intensity = spectral_intensity / plt.amax(spectral_intensity)

    # Prepare plot environment.
    palette    = _create_cropped_cmap()

    x_grid = plt.ones(spectral_lines.shape) * plt.array(scan_values, dtype='float64')
    for file_i in xrange(len(scan_values)):
        x, y, z = x_grid[:,file_i], spectral_lines[:,file_i], normalized_intensity[:,file_i]
        tuneshift_plot = ax[0].scatter(x, y, s=192*plt.log(1+z), c=z, cmap=palette, edgecolors='None')

    # Colorbar
    cb = plt.colorbar(tuneshift_plot, ax[1], orientation='vertical')
    cb.set_label('Power [normalised]')

    if fitrange:
        if not fittype or fittype=='full':
            x, y, z, p = fit_modes_full(spectral_lines, spectral_intensity, scan_values, fitrange)
        elif fittype=="0":
            x, y, z, p = fit_modes_0(spectral_lines, spectral_intensity, scan_values, fitrange)
        else:
            raise ValueError("Wrong argument "+fittype+"! Use \"0\" or \"full\"")

        ax[0].plot(x, y, 'o', ms=8, mfc='none', mew=2, mec='limegreen')
        ax[0].plot(scan_values, z, '-', lw=2, color='limegreen')

        ax[0].text(0.95, 0.95, '$\Delta Q \sim $ {:1.2e}'.format(p[0]*1e11*Qs[0]), fontsize=36, color='w', horizontalalignment='right', verticalalignment='top', transform=ax[0].transAxes)
コード例 #51
0
ファイル: testConj.py プロジェクト: wampixel/nim-games
def createTable(maxC=5, maxL=5) :
    i = 0
    li = []
    for j in range(0, maxL) :
        li.append(ones(maxC))

    return array(li)
コード例 #52
0
def psp_parameter_estimate_fixmem(time, value):
    smoothing_kernel = 10
    smoothed_value = p.convolve(
        value,
        p.ones(smoothing_kernel) / float(smoothing_kernel),
        "same")

    mean_est_part = int(len(value) * .1)
    mean_estimate = p.mean(smoothed_value[-mean_est_part:])
    noise_estimate = p.std(value[-mean_est_part:])

    integral = p.sum(smoothed_value - mean_estimate) * (time[1] - time[0])

    f = 1.

    A_estimate = (max(smoothed_value) - mean_estimate) / (1. / 4.)

    min_A = noise_estimate

    if A_estimate < min_A:
        A_estimate = min_A

    t1_est = integral / A_estimate * f
    t2_est = 2 * t1_est

    tmax_est = time[p.argmax(smoothed_value)] + p.log(t2_est / t1_est) * (t1_est * t2_est) / (t1_est - t2_est)

    return p.array([
        tmax_est,
        A_estimate,
        t1_est,
        mean_estimate])
コード例 #53
0
def Ablock(m):
    # Blockmatrix fuerr 2d-Laplace
    n = m * m

    e = ones(n)
    l = ones(n)
    l[m - 1::m] = 0.0
    r = ones(n)
    r[::m] = 0.0

    A = spdiags([-e, -l, 4.0 * e, -r, -e], [-m, -1, 0, 1, m],
                n,
                n,
                format='csr')

    return A.toarray()
コード例 #54
0
    def __convertToFloats__(self, signal, annotation, time):
        """
        method converts all string values in signal, annotation arrays
        into float values;
        here is one assumption: time array is in float format already
        """
        floats = pl.ones(len(signal))
        if annotation == None:
            entities = zip(signal)
        else:
            entities = zip(signal, annotation)
        for idx, values in enumerate(entities):
            for value in values:
                try:
                    pl.float64(value)  # check if it can be converted to float
                except ValueError:
                    floats[idx] = 0  # the value is NOT like float type
                    break

        true_floats = pl.nonzero(floats)  # get indexes of non-zero positions
        signal = signal[true_floats].astype(float)
        if not annotation == None:
            annotation = annotation[true_floats].astype(float)
        if not time == None:
            time = time[true_floats]

        return signal, annotation, time
コード例 #55
0
    def __convertToFloats__(self, signal, annotation, time):
        """
        method converts all string values in signal, annotation arrays
        into float values;
        here is one assumption: time array is in float format already
        """
        floats = pl.ones(len(signal))
        if annotation == None:
            entities = zip(signal)
        else:
            entities = zip(signal, annotation)
        for idx, values in enumerate(entities):
            for value in values:
                try:
                    pl.float64(value)  # check if it can be converted to float
                except ValueError:
                    floats[idx] = 0  # the value is NOT like float type
                    break

        true_floats = pl.nonzero(floats)  # get indexes of non-zero positions
        signal = signal[true_floats].astype(float)
        if not annotation == None:
            annotation = annotation[true_floats].astype(float)
        if not time == None:
            time = time[true_floats]

        return signal, annotation, time
コード例 #56
0
 def _stft(self):
     if not self._have_x:
         print "Error: You need to load a sound file first: use self.load_audio('filename.wav')"
         return False
     fp = self._check_feature_params()
     num_frames = len(self.x)
     self.STFT = P.zeros((self.nfft/2+1, num_frames), dtype='complex')
     self.win = P.ones(self.wfft) if self.window=='rect' else P.np.sqrt(P.hanning(self.wfft))
     x = P.zeros(self.wfft)
     buf_frames = 0
     for k, nex in enumerate(self.x):
         x = self._shift_insert(x, nex, self.nhop)
         if self.nhop >= self.wfft - k*self.nhop : # align buffer on start of audio
             self.STFT[:,k-buf_frames]=P.rfft(self.win*x, self.nfft).T 
         else:
             buf_frames+=1
     self.STFT = self.STFT / self.nfft
     self._fftfrqs = P.arange(0,self.nfft/2+1) * self.sample_rate/float(self.nfft)
     self._have_stft=True
     if self.verbosity:
         print "Extracted STFT: nfft=%d, hop=%d" %(self.nfft, self.nhop)
     self.inverse=self._istftm
     self.X = abs(self.STFT)
     if not self.magnitude:
         self.X = self.X**2
     return True
コード例 #57
0
ファイル: utils.py プロジェクト: astrofanlee/project_TL
 def int(self, integrand):
     """
     Integrates over second argument of an array
     with Gaussian Quadrature weights
     """
     return M.sum(M.outer(1.*M.ones(integrand.shape[0]),self.weights) * \
                integrand,1)
コード例 #58
0
def define_model(data):
    # Builds model object
    n = len(data)
    variable_names = ['g', 'sigma_g']
    known_params = {'sigma_z_g': sigma_z_g, 'T': ones(n)}
    hyper_params = {
        'prior_mu_g': 0 + zeros(n),
        'prior_cov_g': 100 * eye(n),
        'a_g': 0.,
        'b_g': 0.
    }
    priors = {
        'sigma_g': stats.invgamma(hyper_params['a_g'],
                                  scale=hyper_params['b_g']),
        'g': mvnorm(hyper_params['prior_mu_g'], hyper_params['prior_cov_g'])
    }
    initials = {'g': g[:n], 'sigma_g': sigma_g}
    FCP_samplers = {'g': ground_height_step(), 'sigma_g': sigma_ground_step()}

    model = Model()
    model.set_variable_names(variable_names)
    model.set_known_params(known_params)
    model.set_hyper_params(hyper_params)
    model.set_priors(priors)
    model.set_initials(initials)
    model.set_FCP_samplers(FCP_samplers)
    model.set_data(data)

    return model
コード例 #59
0
def mlab_imshowColor(im, alpha=255, **kwargs):
    """
    Plot a color image with mayavi.mlab.imshow.
    im is a ndarray with dim (n, m, 3) and scale (0->255]
    alpha is a single number or a ndarray with dim (n*m) and scale (0->255]
    **kwargs is passed onto mayavi.mlab.imshow(..., **kwargs)
    """
    try:
        alpha[0]
    except:
        alpha = pl.ones(im.shape[0] * im.shape[1]) * alpha
    if len(alpha.shape) != 1:
        alpha = alpha.flatten()

    # The lut is a Nx4 array, with the columns representing RGBA
    # (red, green, blue, alpha) coded with integers going from 0 to 255,
    # we create it by stacking all the pixles (r,g,b,alpha) as rows.
    myLut = pl.c_[im.reshape(-1, 3), alpha]
    myLutLookupArray = pl.arange(im.shape[0] * im.shape[1]).reshape(im.shape[0], im.shape[1])

    #We can display an color image by using mlab.imshow, a lut color list and a lut lookup table.
    theImshow = mlab.imshow(myLutLookupArray, colormap='binary', **kwargs) #temporary colormap
    theImshow.module_manager.scalar_lut_manager.lut.table = myLut
    mlab.draw()

    return theImshow