Exemple #1
0
def test_converting_to_factors():

    test_data = DataFrame(
        {
            'colA': Series(randn(1, 5000).flatten() > 0),
            'colB': Series(100 * randn(1, 5000).flatten()),
            'colC': Series(100 + randn(1, 5000).flatten()),
            'colD': Series(randn(1, 5000).flatten() > 0),
        },
    )

    test_data['colA'] = test_data['colA'].map(str)
    test_data['colD'] = test_data['colD'].map(str)

    factor_cols = [('colA', 'True'),
                   ('colD', 'True')]

    rpy_test_df = com.convert_to_r_dataframe(test_data)

    rpy_out_df = Rtools.convert_columns_to_factors(rpy_test_df, factor_cols)
    test_cols = [('colA', 'factor'),
                 ('colB', 'numeric'),
                 ('colC', 'numeric'),
                 ('colD', 'factor')]

    for col, typ in test_cols:
        if typ == 'factor':
            yield eq_, rpy_out_df.rx2(col).nlevels, 2
        elif typ == 'numeric':
            yield ok_, (not hasattr(rpy_out_df.rx2(col), 'nlevels'))
Exemple #2
0
    def _genBgTerm_fromXX(self,vTot,vCommon,XX,a=None,c=None):
        """
        generate background term from SNPs

        Args:
            vTot: variance of Yc+Yi
            vCommon: variance of Yc
            XX: kinship matrix
            a: common scales, it can be set for debugging purposes
            c: indipendent scales, it can be set for debugging purposes
        """
        vSpecific = vTot-vCommon

        SP.random.seed(0)
        if c==None: c = SP.randn(self.P)
        XX += 1e-3 * SP.eye(XX.shape[0])
        L = LA.cholesky(XX,lower=True)

        # common effect
        R = self.genWeights(self.N,self.P)
        A = self.genTraitEffect()
        if a is not None: A[0,:] = a
        Yc = SP.dot(L,SP.dot(R,A))
        Yc*= SP.sqrt(vCommon)/SP.sqrt(Yc.var(0).mean())

        # specific effect
        R = SP.randn(self.N,self.P)
        Yi = SP.dot(L,SP.dot(R,SP.diag(c)))
        Yi*= SP.sqrt(vSpecific)/SP.sqrt(Yi.var(0).mean())

        return Yc, Yi
Exemple #3
0
def test_mixed_model():

    test_data = DataFrame(
        {
            'colA': Series(randn(1, 5000).flatten() > 0),
            'colB': Series(100 * randn(1, 5000).flatten()),
            'colC': Series(100 + randn(1, 5000).flatten()),
            'colD': Series(randn(1, 5000).flatten() > 0),
            },
        )

    test_data['colA'] = test_data['colA'].map(str)
    test_data['colD'] = test_data['colD'].map(str)

    factor_cols = [('colA', 'True'),
                   ('colD', 'True')]

    rpy_test_df = com.convert_to_r_dataframe(test_data)
    rpy_test_df = Rtools.convert_columns_to_factors(rpy_test_df, factor_cols)

    base_formula = Formula('colC ~ as.factor(colA) + colB')
    rand_formula = Formula('~1|colD')

    results = Rtools.R_linear_mixed_effects_model(rpy_test_df, base_formula, rand_formula)

    print results['tTable']
    ok_(('tTable' in results), 'Did not have the tTable in the results')
    ok_(('as.factor(colA)False' in results['tTable'].index), 'Did not have the factor in the tTable')
    ok_(('colB' in results['tTable'].index), 'Did not have the variable in the tTable')
 def test_far_apart_clusters_estimate_all(self):
     cluster1 = sp.randn(40,1000)
     cluster2 = sp.randn(40,1000) * 2
     cluster2[0,:] += 10
     clusterList1 = [cluster1[:,i]
                     for i in xrange(sp.size(cluster1,1))]
     clusterList2 = [cluster2[:,i]
                     for i in xrange(sp.size(cluster2,1))]
     total, pair = qa.overlap_fp_fn(
         {1: clusterList1, 2: clusterList2})
     self.assertLess(total[1][0], 1e-4)
     self.assertLess(total[1][1], 1e-4)
     self.assertLess(total[2][0], 1e-4)
     self.assertLess(total[2][1], 1e-4)
     self.assertLess(pair[1][2][0], 1e-4)
     self.assertLess(pair[1][2][1], 1e-4)
     self.assertLess(pair[2][1][0], 1e-4)
     self.assertLess(pair[2][1][1], 1e-4)
     self.assertGreater(total[1][0], 0.0)
     self.assertGreater(total[1][1], 0.0)
     self.assertGreater(total[2][0], 0.0)
     self.assertGreater(total[2][1], 0.0)
     self.assertGreater(pair[1][2][0], 0.0)
     self.assertGreater(pair[1][2][1], 0.0)
     self.assertGreater(pair[2][1][0], 0.0)
     self.assertGreater(pair[2][1][1], 0.0)
Exemple #5
0
 def simulate(self,standardize=True):
     self._update_cache()
     RV = SP.zeros((self.N,self.P))
     # region
     Z = SP.randn(self.S,self.P)
     Sc,Uc = LA.eigh(self.Cr.K())
     Sc[Sc<1e-9] = 0
     USh_c = Uc*Sc[SP.newaxis,:]**0.5 
     RV += SP.dot(SP.dot(self.Xr,Z),USh_c.T)
     # background
     Z = SP.randn(self.N,self.P)
     USh_r = self.cache['Lr'].T*self.cache['Srstar'][SP.newaxis,:]**0.5
     Sc,Uc = LA.eigh(self.Cg.K())
     Sc[Sc<1e-9] = 0
     USh_c = Uc*Sc[SP.newaxis,:]**0.5
     RV += SP.dot(SP.dot(USh_r,Z),USh_c.T)
     # noise
     Z = SP.randn(self.N,self.P)
     Sc,Uc = LA.eigh(self.Cn.K())
     Sc[Sc<1e-9] = 0
     USh_c = Uc*Sc[SP.newaxis,:]**0.5 
     RV += SP.dot(Z,USh_c.T)
     # standardize
     if standardize:
         RV-=RV.mean(0)
         RV/=RV.std(0) 
     return RV
Exemple #6
0
 def _generate_null_shift_errors(self):
     """
     Returns null shift (constant bias) entries in a 6 entry array:
        array([gx_n, gy_n, gz_n, ax_n, ay_n, az_n])
     
     where the subscript 'n' stands for 'null shift'.
     
     Note
     -----
     The constant bias is constant during the run, but varies from run-to-run.  
     
     If, for experimental purposes, you'ld like the SAME constant bias generated
     every run, then this function should be modified to use the standard deviation
     value (not multiplied by random number).
     """
     # If the null-shift value has been generated once, then that value should be used.
     # Otherwise, it will be generated and saved for the next call.        
     try:
         return self._constant_bias
     except AttributeError:          
         # Original code used a non-random null-shift.  This could be acceptable if
         # using a single vehicle, but unrealistic if used for a entire community.  So now the null-shift is random.
         accel_null = self._sqd['sigma_n_f'] * sp.randn(3)
         gyro_null  = self._sqd['sigma_n_g'] * sp.randn(3)
         
         self._constant_bias =  np.hstack((gyro_null, accel_null)) 
         return self._constant_bias
Exemple #7
0
    def getPL(self,r,RSSStd):
        """ Get Power Level from a given distance

        Parameters
        ----------

        r : range
        RSSStd : range standard deviation

        Examples
        --------

        >>> M = PLSmodel(f=0.3,rssnp=2.64,d0=1,sigrss=3,method='mode')
        >>> PL =  M.getPL(16,1)

        """

        if self.method =='OneSlope':
            PL=self.OneSlope(r)

        elif self.method == 'mode' or self.method == 'median' or self.method == 'mean':
            PLmean          = self.getPLmean(r)
            try:
                shPLmean        = np.shape(PLmean)
                Xrand           = RSSStd*sp.randn(shPLmean[0])
            except:
                Xrand           = RSSStd*sp.randn()
            PL        = PLmean+Xrand

        else :
            raise NameError('Pathloss method name')

        return(PL)
Exemple #8
0
def gendat(TWO_KERNEL,nInd,nSnp,nCovar,minMaf=0.05,maxMaf=0.4,minSigE2=0.5,maxSigE2=1,minSigG2=0.5,maxSigG2=1):
    '''
    Generate synthetic SNPs and phenotype.
    SNPs are iid, and there is no population structure.
    Phenotype y is generated from a LMM with SNPs in a PS kernel.

    Returns:
        covDat
        y
        psSnps
    '''

    if TWO_KERNEL:
        psSnps=gensnps(nInd,nSnp,minMaf,maxMaf)
        psK=psSnps.dot(psSnps.T)
        psK+=1e-5*sp.eye(nInd)
        psKchol=la.cholesky(psK)
    else:
        psSnps=None

    covDat=sp.random.uniform(0,1,(nInd,nCovar))
    covWeights=sp.random.uniform(-0.5,0.5,(nCovar,1))

    sigE2=sp.random.uniform(low=minSigE2,high=maxSigE2)
    sigG2=sp.random.uniform(low=minSigG2,high=maxSigG2)

    ##generate the phenotype using the background kernel and covariates
    if TWO_KERNEL:
        y_pop=sp.sqrt(sigG2)*psKchol.dot(sp.randn(nInd,1))
    else:
        y_pop=0
    y_noise=sp.randn(nInd,1)*sp.sqrt(sigE2)
    y=(covDat.dot(covWeights) + y_pop + y_noise).squeeze()
    return covDat, y, psSnps
Exemple #9
0
def fitPairwiseModel(Y,XX=None,S_XX=None,U_XX=None,verbose=False):
    N,P = Y.shape
    """ initilizes parameters """
    RV = fitSingleTraitModel(Y,XX=XX,S_XX=S_XX,U_XX=U_XX,verbose=verbose)
    Cg = covariance.freeform(2)
    Cn = covariance.freeform(2)
    gp = gp2kronSum(mean(Y[:,0:2]),Cg,Cn,XX=XX,S_XX=S_XX,U_XX=U_XX)
    conv2 = SP.ones((P,P),dtype=bool)
    rho_g = SP.ones((P,P))
    rho_n = SP.ones((P,P))
    for p1 in range(P):
        for p2 in range(p1):
            if verbose:
                print '.. fitting correlation (%d,%d)'%(p1,p2)
            gp.setY(Y[:,[p1,p2]])
            Cg_params0 = SP.array([SP.sqrt(RV['varST'][p1,0]),1e-6*SP.randn(),SP.sqrt(RV['varST'][p2,0])])
            Cn_params0 = SP.array([SP.sqrt(RV['varST'][p1,1]),1e-6*SP.randn(),SP.sqrt(RV['varST'][p2,1])])
            params0 = {'Cg':Cg_params0,'Cn':Cn_params0}
            conv2[p1,p2],info = OPT.opt_hyper(gp,params0,factr=1e3)
            rho_g[p1,p2] = Cg.K()[0,1]/SP.sqrt(Cg.K().diagonal().prod())
            rho_n[p1,p2] = Cn.K()[0,1]/SP.sqrt(Cn.K().diagonal().prod())
            conv2[p2,p1] = conv2[p1,p2]; rho_g[p2,p1] = rho_g[p1,p2]; rho_n[p2,p1] = rho_n[p1,p2]
    RV['Cg0'] = rho_g*SP.dot(SP.sqrt(RV['varST'][:,0:1]),SP.sqrt(RV['varST'][:,0:1].T))
    RV['Cn0'] = rho_n*SP.dot(SP.sqrt(RV['varST'][:,1:2]),SP.sqrt(RV['varST'][:,1:2].T))
    RV['conv2'] = conv2
    #3. regularizes covariance matrices
    offset_g = abs(SP.minimum(LA.eigh(RV['Cg0'])[0].min(),0))+1e-4
    offset_n = abs(SP.minimum(LA.eigh(RV['Cn0'])[0].min(),0))+1e-4
    RV['Cg0_reg'] = RV['Cg0']+offset_g*SP.eye(P)
    RV['Cn0_reg'] = RV['Cn0']+offset_n*SP.eye(P)
    RV['params0_Cg']=LA.cholesky(RV['Cg0_reg'])[SP.tril_indices(P)]
    RV['params0_Cn']=LA.cholesky(RV['Cn0_reg'])[SP.tril_indices(P)]
    return RV
def fit_krr_dskl_rks_result(X,Y,Xtest,Ytest,its=100,eta=.1,C=.001,nPredSamples=30,nExpandSamples=10, kernel=(GaussianKernel,(1.))):
    # random gaussian for rks
    Zrks = sp.randn(len(Y),X.shape[0]) / (kernel[1]**2)
    Wrks = sp.randn(len(Y))
    for it in range(1,its+1):
        Wrks = step_dskl_rks_krr(X,Y,Wrks,Zrks,eta/it,C,nPredSamples,nExpandSamples)
    return predict_krr_rks(Xtest,Wrks,Zrks) 
Exemple #11
0
 def _sim_from(self, set_covar='block', seed=None, qq=False):
     ##1. region term
     if set_covar=='block':
         Cr = self.block['Cr']
         Cg = self.block['Cg']
         Cn = self.block['Cn']
     if set_covar=='rank1':
         Cr = self.lr['Cr']
         Cg = self.lr['Cg']
         Cn = self.lr['Cn']
     Lc = msqrt(Cr)
     U, Sh, V = nla.svd(self.Xr, full_matrices=0)
     Lr = sp.zeros((self.Y.shape[0], self.Y.shape[0]))
     Lr[:, :Sh.shape[0]] = U * Sh[sp.newaxis, :]
     Z = sp.randn(*self.Y.shape)
     Yr = sp.dot(Lr, sp.dot(Z, Lc.T))
     ##2. bg term
     Lc = msqrt(Cg)
     Lr = self.XXh
     Z = sp.randn(*self.Y.shape)
     Yg = sp.dot(Lr, sp.dot(Z, Lc.T))
     # noise terms
     Lc = msqrt(Cn)
     Z = sp.randn(*self.Y.shape)
     Yn = sp.dot(Z, Lc.T)
     # normalize
     Y = Yr + Yg + Yn
     if qq:
         Y = gaussianize(Y)
         Y-= Y.mean(0)
         Y/= Y.std(0)
     return Y
Exemple #12
0
    def setUp(self):
        np.random.seed(1)

        # generate data
        N = 400
        s_x = 0.05
        s_y = 0.1
        X = (sp.linspace(0, 2, N) + s_x * sp.randn(N))[:, sp.newaxis]
        Y = sp.sin(X) + s_y * sp.randn(N, 1)
        Y -= Y.mean(0)
        Y /= Y.std(0)

        Xstar = sp.linspace(0, 2, 1000)[:, sp.newaxis]

        # define mean term
        F = 1.0 * (sp.rand(N, 2) < 0.2)
        mean = lin_mean(Y, F)

        # define covariance matrices
        covar1 = SQExpCov(X, Xstar=Xstar)
        covar2 = FixedCov(sp.eye(N))
        covar = SumCov(covar1, covar2)

        # define gp
        self._gp = GP(covar=covar, mean=mean)
def writeBackRepsAddNoise(wc1,wc2,y1,y2,geneName,n_reps):
    for i in range(n_reps):
        c1 = [geneName]
        c2 = [geneName]
        c1.extend(y1+SP.randn(y1.shape[0])*.1)
        c2.extend(y2+SP.randn(y2.shape[0])*.1)
        wc1.writerow(c1)
        wc2.writerow(c2)
Exemple #14
0
def awgn(sig,snrdb,sigpower=0):
    """Additive white gaussian noise.  Assumes signal power is 0 dBW"""
    if sp.iscomplexobj(sig):
        noise = (sp.randn(*sig.shape) + 1j*sp.randn(*sig.shape))/math.sqrt(2)
    else:
        noise = sp.randn(*sig.shape)
    noisev = 10**((sigpower - snrdb)/20)
    return sig + noise*noisev
Exemple #15
0
 def ts(self, n):
     R = np.empty(n)
     X = np.empty(n)
     X[0] = 1
     for t in range(n-1):
         R[t] = np.sqrt(X[t]) * randn(1)
         X[t+1] = self.a0 + self.b * X[t] + self.a1 * R[t]**2
     R[n-1] = np.sqrt(X[n-1]) * randn(1)
     return R
Exemple #16
0
 def sim_bivariate(self, N):
     R = np.empty(N)
     X = np.empty(N)
     X[0] = 1
     for t in range(N-1):
         R[t] = sqrt(X[t]) * randn(1)
         X[t+1] = self.a0 + self.b * X[t] + self.a1 * R[t]**2
     R[N-1] = sqrt(X[N-1]) * randn(1)
     return R, X
Exemple #17
0
 def simulate_pheno(self):
     Yc = sp.dot(self.mean.F[0], sp.dot(self.mean.B[0], self.mean.A[0].T))
     Z = sp.randn(self.covar.G.shape[1], self.covar.Cr.X.shape[1])
     Yr = sp.dot(self.covar.G, sp.dot(Z, self.covar.Cr.X.T))
     _S, _U = LA.eigh(self.covar.Cn.K()); _S[_S<0] = 0
     Cn_h = _U*_S**0.5
     Yn = sp.dot(sp.randn(*self.mean.Y.shape), Cn_h.T)
     RV = Yc+Yr+Yn
     return RV
Exemple #18
0
	def _initParams_random(self):
		""" 
		initialize the gp parameters randomly
		"""
		# gp hyper params
		params = limix.CGPHyperParams()
		if self.interaction:	params['covar'] = SP.concatenate([SP.randn(self.N*self.k+1),SP.ones(1),SP.randn(1)])
		else:					params['covar'] = SP.randn(self.N*self.k+1)
		params['lik'] = SP.randn(1)
		return params
Exemple #19
0
 def _additionalInit(self):
     phi_size = self.num_actions * self.num_features
     if self.randomInit:
         self._A = randn(phi_size, phi_size) / 100.
         self._b = randn(phi_size) / 100.
     else:
         self._A = zeros((phi_size, phi_size))
         self._b = zeros(phi_size)          
     self._untouched = ones(phi_size, dtype=bool)
     self._count = 0
Exemple #20
0
Pcross = sp.zeros(steps)  #
RhoCross = sp.zeros(steps)  #
Scross = sp.zeros(steps)  #
K0a = sp.zeros(steps)  #Tracking isentrope parameters
K1a = sp.zeros(steps)
K2a = sp.zeros(steps)
gamma_a = sp.zeros(steps)
gamma_index = sp.zeros(steps)
qa = sp.zeros(steps)
cvc_mc = sp.zeros(size)
#################################################################
# begin monte carlo, only one, encompasses all calculations.
j = 0
while j < steps:
    #Calculate perturbations for everything here.
    K0s = K0 + K0e * sp.randn()
    K0a[j] = K0s
    K1s = K1 + K1e * sp.randn()
    K1a[j] = K1s
    K2s = K2 + K2e * sp.randn()
    K2a[j] = K2s
    gamma_i = gamma0 + gamma0e * sp.randn()
    gamma_a[j] = gamma_i
    rho_gamma = gamma_rho + gamma_rhoe * sp.randn()
    q = q0 + q0e * sp.randn()
    qa[j] = q
    q2_mc = q2 + q2e * sp.randn()
    #rho_init_l=rho0l+rho0le*sp.randn()
    rho_init_l = rho0l + 11 * sp.randn()  #For asimow version
    rho_init_s = rho0s + rho0se * sp.randn()
    dSm = dS0 + dS0e * sp.randn()
Exemple #21
0
          val_obj)
    print('Newton-CG: The final relative duality gap: %s \n' % gap)
    print('Newton-CG: The rank of the Optimal Solution - tau*I: %s \n' %
          rank_x)
    print('Newton-CG: computing time for computing preconditioners: %s \n' %
          prec_time)
    print(
        'Newton-CG: computing time for linear system solving (cgs time): %s \n'
        % pcg_time)
    print(
        'Newton-CG: computing time for eigenvalue decompositions: =============== %s \n'
        % eig_time)
    print(
        'Newton-CG: computing time used for equal weight calibration ============ %s \n'
        % time_used)

    return x_result, y


# end of the main function

# test
n = 3000
data_g_test = scipy.randn(n, n)
data_g_test = (data_g_test + data_g_test.transpose()) / 2.0
data_g_test = data_g_test - np.diag(np.diag(data_g_test)) + np.eye(n)
b = np.ones((n, 1))
tau = 0
tol = 1.0e-6
[x_test_result, y_test_result] = my_correlationmatrix(data_g_test, b, tau, tol)
Exemple #22
0
# Grid of values for wealth over which function will be approximated
gridmax, gridsize = 5, 300
dx=0.01          # Gridcell size for pdf
wgrid=np.arange(0.1,gridmax,dx)
Ws=wgrid

# Lifespan is TT+1, i.e. agent dies in period T+1
TT=T

# auxiliary parameters and functions
theta1=1-theta
rho=beta

# Assume income process is U shaped + log-normal shock with mean 1 and std 0.1
sigman=0.18               # Std of log-income
yt = exp(sigman*randn(N,TT))                   # Draws of shock
if yp1=='':
    yp = .6-constant*1.5*(np.arange(0,1,1/(TT+2))-.5)**2
else:
    yp = 1-constant*(np.arange(0,1,1/(TT+2))-.5)**2



# Parameters of the linear consumption function
agrid=np.arange(0.01,1.5,0.01)       # Intercept
bgrid=np.arange(0.01,1,0.01)       # MPC: Marginal propensity to consume out of wealth

# Parameters for varying the algorithm
Newt=0                      # Flag for using full Hessian
fr=1                        # Fractional step size
gain=0                      # Gain parameter
Exemple #23
0
        )  # Can use the Chi^2 CDF/SF to evaluate the scaled Chi^2 by rescaling the input.
        pv[i0] = 1.0
        return (pv, mixture, scale, dof, i0)


if __name__ == "__main__":
    logging.basicConfig(level=logging.INFO)
    logging.info("generate chi-2 distributed values")
    scale = 3
    dof = 2
    mixture = 0.5
    ntests = 10000
    lrt = sp.zeros((ntests))
    lrttest = sp.zeros((ntests))
    for i in range(dof):
        x = sp.randn(ntests)
        xtest = sp.randn(ntests)
        lrt += scale * (x * x)
        lrttest += scale * (xtest * xtest)

    lrt[sp.random.permutation(ntests)[0:sp.ceil(ntests * mixture)]] = 0.0
    lrttest[sp.random.permutation(ntests)[0:sp.ceil(ntests * mixture)]] = 0.0

    qmax = 0.2
    logging.info(("create the distribution object, with qmax = %.4f" % qmax))
    mix = chi2mixture(lrt=lrt, a2=None, qmax=0.2)  #object constructor

    logging.info(
        "fit the parameter of the object by log-Pvalue quantile regression")
    import time
    t0 = time.time()
Exemple #24
0
from scipy.optimize import curve_fit
import matplotlib.pyplot as plt
import time

tic = time.time()

'''function to optimize'''
def func(u, K, tau, y0):
	sys = signal.lti(K, [tau, 1])
	y = sys.output(u, t, y0)
	return y[1]

'''input square signal'''
global t
t = linspace(0, 20, 1000)
Umes = -0.5*(signal.square(2*pi*0.1*t)+ones(len(t)))+ones(len(t))+randn(len(t))/50

'''noisy output signal'''
p = [1, 1, 0]
Ymes = func(Umes, *p)+randn(len(Umes))/50

'''input and output signals plot'''
plt.plot(t, Umes, label = 'Umes')
plt.plot(t, Ymes, label = 'Ymes')


'''optimization with non-linear least squares method'''
popt, cov = curve_fit(func, Umes, Ymes)
print(p)
print(popt)
Centre,Var = spcv.kmeans(X, Num_of_clusters )
id,dist = spcv.vq(X,Centre)

print id, dist

#Sample data creation

#number of points
n = 50
t = linspace(-5, 5, n)
#parameters
a = 0.8
b = -4
x = nppp.polyval(t,[a, b])
#add some noise
xn= x+randn(n)
(ar,br) = nppp.polyfit(t,xn,1)
xr = nppp.polyval(t,[ar,br])

#compute the mean square error
err = sqrt(sum((xr-xn)**2)/n)
print('Linear regression using polyfit')
print('parameters: a=%.2f b=%.2f \nregression: a=%.2f b=%.2f, ms error= %.3f' % (a,b,ar,br,err))
print('-----------------------------------------------------')

#Linear regression using stats.linregress
(a_s,b_s,r,tt,stderr) = sps.linregress(t,xn)
print('Linear regression using stats.linregress')
print('parameters: a=%.2f b=%.2f \nregression: a=%.2f b=%.2f, std error= %.3f' % (a,b,a_s,b_s,stderr))

#matplotlib ploting
Exemple #26
0
 def __init__(self, net, prec=1.):
     CNodeWsparse.__init__(self, net, prec=prec)
     #variable initialisation in CNodeWsparse
     self.sigma2 = (1.0 / prec) * SP.ones((net._D, net.components))
     self.E1 = SP.randn(net._D, net.components)
     self.E2diag = SP.zeros((net._D, net.components))
Exemple #27
0
import backprop_network as bp

#Create two template matrices with dimensions dim*dim
dim=4
template1=np.array([[1,0,0,0],[0,1,0,0],[0,0,1,0],[0,0,0,1]])
template2=np.array([[1,0,0,0],[1,0,0,0],[1,0,0,0],[1,0,0,0]])

#Flatten each template into a 1D array
pattern1 = template1.reshape((1,dim*dim))
pattern2 = template2.reshape((1,dim*dim))

#Create num training instances of each pattern
num_instances = 20
noise_level=.0
pattern1_data = np.repeat(pattern1,num_instances,0)
pattern1_noise = randn(num_instances, dim*dim)
pattern1_data = pattern1_data + noise_level*pattern1_noise
pattern2_data = np.repeat(pattern2,num_instances,0)
pattern2_noise = randn(num_instances, dim*dim)
pattern2_data = pattern2_data + noise_level*pattern2_noise

#Normalise the data so it has a mean of zero. (This is an important prerequisite for PCA!)
mu1 = np.mean(pattern1_data)
pattern1_data = np.subtract(pattern1_data, mu1)
mu2 = np.mean(pattern2_data)
pattern2_data = np.subtract(pattern2_data, mu2)

#Close all currently open figures
plt.close('all')
#train
def show_training_images():
Exemple #28
0
    def step(self, niter):
        """ xNES """
        f = self.f
        mu, sigma, bmat = self.mu, self.sigma, self.bmat
        eta_mu, eta_sigma, eta_bmat = self.eta_mu, self.eta_sigma, self.eta_bmat
        npop = self.npop
        dim = self.dim
        sigma_old = self.sigma_old

        eyemat = eye(dim)

        with joblib.Parallel(n_jobs=self.n_jobs) as parallel:

            for i in range(niter):
                s_try = randn(npop, dim)
                z_try = mu + sigma * dot(s_try, bmat)  # broadcast

                f_try = parallel(joblib.delayed(f)(z) for z in z_try)
                f_try = asarray(f_try)

                # save if best
                fitness = mean(f_try)
                if fitness - 1e-8 > self.fitness_best:
                    self.fitness_best = fitness
                    self.mu_best = mu.copy()
                    self.counter = 0
                else:
                    self.counter += 1
                if self.counter > self.patience:
                    self.done = True
                    return

                isort = argsort(f_try)
                f_try = f_try[isort]
                s_try = s_try[isort]
                z_try = z_try[isort]

                u_try = self.utilities if self.use_fshape else f_try

                if self.use_adasam and sigma_old is not None:  # sigma_old must be available
                    eta_sigma = self.adasam(eta_sigma, mu, sigma, bmat,
                                            sigma_old, z_try)

                dj_delta = dot(u_try, s_try)
                dj_mmat = dot(s_try.T, s_try *
                              u_try.reshape(npop, 1)) - sum(u_try) * eyemat
                dj_sigma = trace(dj_mmat) * (1.0 / dim)
                dj_bmat = dj_mmat - dj_sigma * eyemat

                sigma_old = sigma

                # update
                mu += eta_mu * sigma * dot(bmat, dj_delta)
                sigma *= exp(0.5 * eta_sigma * dj_sigma)
                bmat = dot(bmat, expm(0.5 * eta_bmat * dj_bmat))

                # logging
                self.history['fitness'].append(fitness)
                self.history['sigma'].append(sigma)
                self.history['eta_sigma'].append(eta_sigma)

        # keep last results
        self.mu, self.sigma, self.bmat = mu, sigma, bmat
        self.eta_sigma = eta_sigma
        self.sigma_old = sigma_old
Exemple #29
0
def PR_MultiSine_adapt(
    f1,
    Nperiods,
    Nsamples,
    Nf=8,
    fs_min=0,
    fs_max=1e9,
    frange=10,
    log=True,
    phases=None,
    sample_inkr=1,
):
    """
    Returns an additive normalized Multisine time series. \n
    f1 = start frequency (may be adapted) \n
    Nperiods = number of periods of f1 (may be increased) \n
    Nsamples = Minimum Number of samples  \n
    Nf = number of frequencies in multi frequency mix \n
    fs_min = minimum sample rate of used device (default 0) \n
    fs_max = maximum sample rate of used device (default 0) \n
    frange = range of frequency as a factor relative to f1 (default 10 = decade) \n
    log = boolean for logarithmic (True, default) or linear (False) frequency scale \n
    phases = float array of given phases for the frequencies (default=None=random) \n
    deg= boolean for return phases in deg (True) or rad (False) \n
    sample_inkr = minimum block of samples to add to a waveform
    \n
    returns: freq,phase,fs,ti,multi \n
    freq= array of frequencies \n
    phase=used phases in deg or rad \n
    fs=sample rate \n
    ti=timestamps \n
    multi=array of time series values \n
    """
    if (Nsamples // sample_inkr * sample_inkr !=
            Nsamples):  # check multiplicity of sample_inkr
        Nsamples = (Nsamples // sample_inkr +
                    1) * sample_inkr  # round to next higher multiple

    T0 = Nperiods / f1  # given duration
    fs0 = Nsamples / T0  # (implicitly) given sample rate

    if False:
        print("0 Nperiods: " + str(Nperiods))
        print("0 Nsamples: " + str(Nsamples))
        print("0 fs: " + str(fs0))
        print("0 T0: " + str(T0))
        print("0 f1: " + str(f1))

    fs = fs0
    if fs0 < fs_min:  # sample rate too low, then set to minimum
        fs = fs_min
        print("sample rate increased")
    elif fs0 > fs_max:  # sample rate too high, set to max-allowed and
        fs = fs_max
        Nperiods = sp.ceil(
            Nperiods * fs0 / fs_max
        )  # increase number of periods to get at least Nsamples samples
        T0 = Nperiods / f1
        print("sample rate reduced, Nperiods=" + str(Nperiods))

    Nsamples = T0 * fs
    if (Nsamples // sample_inkr * sample_inkr !=
            Nsamples):  # check multiplicity of sample_inkr
        Nsamples = (Nsamples // sample_inkr +
                    1) * sample_inkr  # round to next higher multiple

    T1 = Nsamples / fs  # adapt exact duration
    f1 = Nperiods / T1  # adapt f1 for complete cycles
    if False:
        print("Nperiods: " + str(Nperiods))
        print("Nsamples: " + str(Nsamples))
        print("fs: " + str(fs))
        print("T1: " + str(T1))
        print("f1: " + str(f1))

    f_res = 1 / T1  # frequency resolution
    # determine a series of frequencies (freq[])
    if log:
        fact = sp.power(frange, 1.0 / (Nf - 1))  # factor for logarithmic scale
        freq = f1 * sp.power(fact, sp.arange(Nf))
    else:
        step = (frange - 1) * f1 / (Nf - 1)
        freq = sp.arange(f1, frange * f1 + step, step)

    # auxiliary function to find the nearest available frequency
    def find_nearest(
        x, possible
    ):  # match the theoretical freqs to the possible periodic freqs
        idx = (sp.absolute(possible - x)).argmin()
        return possible[idx]

    fi_pos = sp.arange(f1, frange * f1 + f_res,
                       f_res)  # possible periodic frequencies
    f_real = []
    for f in freq:
        f_real.append(find_nearest(f, fi_pos))
    freq = sp.hstack(f_real)
    if True:
        print("freq: " + str(freq))

    if phases is None:  # generate random phases
        phase = sp.randn(Nf) * 2 * sp.pi  # random phase
    else:  # use given phases
        phase = phases

    return freq, phase, T1, fs
Exemple #30
0
def main():
    '''Define the main function. '''
    # Create a sine-wave
    t = np.arange(0, 10, 0.1)
    x = np.sin(t)

    # Save the data in a text-file, in column form
    # The formatting is a bit clumsy: data are by default row variables; so to
    # get a matrix, you stack the two rows above each other, and then transpose
    # the matrix
    outFile = 'test.txt'
    np.savetxt(outFile, np.vstack([t, x]).T)

    # Read the data into a different variable
    inData = np.loadtxt(outFile)
    t2 = inData[:, 0]  # Note that Python starts at "0"!
    x2 = inData[:, 1]

    # Plot the data, and wait for the user to click
    plt.show()
    plt.plot(t2, x2)
    plt.title('Hit any key to continue')
    plt.waitforbuttonpress()

    # Generate a noisy line
    t = np.arange(-100, 100)
    # use a Python "dictionary" for named variables
    par = {'offset': 100, 'slope': 0.5, 'noiseAmp': 4}
    x = par['offset'] + par['slope'] * t + par['noiseAmp'] * sp.randn(len(t))

    # Select "late" values, i.e. with t>10
    xHigh = x[t > 10]
    tHigh = t[t > 10]

    # Plot the "late" data
    plt.close()
    plt.plot(tHigh, xHigh)

    # Determine the best-fit line
    # To do so, you have to generate a matrix with "time" in the first
    # column, and a column of "1" in the second column:
    xMat = np.vstack((tHigh, np.ones(len(tHigh)))).T
    slope, intercept = np.linalg.lstsq(xMat, xHigh)[0]

    # Show and plot the fit, and save it to a PNG-file with a medium resolution.
    # The "modern" way of Python-formatting is used
    plt.hold(True)
    plt.plot(tHigh, intercept + slope * tHigh, 'r')
    plt.title('Hit any key to continue')
    plt.savefig('linefit.png', dpi=200)
    plt.waitforbuttonpress()
    plt.close()
    print(('Fit line: intercept = {0:5.3f}, and slope = {1:5.3f}'.format(
        intercept, slope)))
    #raw_input('Thanks for using programs by Thomas!')

    # If you want to know confidence intervals, best switch to "pandas"
    # Note that this is an advanced topic, and requires new data structures
    # such ad "DataFrames" and "ordinary-least-squares" or "ols-models".
    import pandas
    myDict = {'x': tHigh, 'y': xHigh}
    df = pandas.DataFrame(myDict)
    model = pandas.ols(y=df['y'], x=df['x'])
    print(model)
Exemple #31
0
#线性代数
x = np.array([[1., 2., 3.], [4., 5., 6.]])
y = np.array([[6., 23.], [-1, 7], [8, 9]])
print x
print y
print x.dot(y)  #等价于np.dot(x,y)

print np.dot(x, np.ones(3))
print np.ones(3)
print np.random.seed(12345)

from numpy.linalg import inv, qr
from scipy import randn

print 'x = randn(5,5) 随机生成5X5的矩阵'
x = randn(5, 5)
print x

print 'mat = x.T.dot(x) 计算矩阵乘法'
mat = x.T.dot(x)
print mat

print 'inv(mat) inv 计算方正的逆'
inv(mat)
print inv(mat)

print 'mat.dot(inv(mat))'
mat.dot(inv(mat))
print mat.dot(inv(mat))

print 'qr(mat) 计算QR分解'
Exemple #32
0
 def __init__(self, *args, **kwargs):
     MultiModalFunction.__init__(self, *args, **kwargs)
     self._signs = sign(randn(self.xdim))
     self._diags = generateDiags(10, self.xdim)
     self.xopt = self._k2 * self._signs
# -*- encoding:UTF-8 -*-
import scipy
import scipy.cluster.hierarchy as sch
from scipy.cluster.vq import vq, kmeans, whiten
import numpy as np
import matplotlib.pylab as plt

points = scipy.randn(20, 4)
print(points)
data = whiten(points)
# print(data)

centroid = kmeans(data, 3)[0]
print(centroid)

label = vq(np.array([[1, 1, 1, 1]]), centroid)

print(label)
Exemple #34
0
                var_block = sp.trace(Cr_block) * trRr / float(self.Y.size - 1)
                var_rank1 = sp.trace(Cr_rank1) * trRr / float(self.Y.size - 1)
                RV['var_r'] = sp.array(
                    [var_block, var_rank1 - var_block, var_r - var_rank1])
        return RV


if __name__ == '__main__':

    if 1:

        N = 1000
        P = 2
        S = 20
        Xr = 1. * (sp.rand(N, S) < 0.2)
        Y = sp.randn(N, P)
        X = sp.randn(N, 100)
        Rg = sp.dot(X, X.T) / float(X.shape[1])
        Rg += 1e-4 * sp.eye(X.shape[0])
        Sg, Ug = la.eigh(Rg)

        pdb.set_trace()

        t0 = time.time()
        mvset = MvSetTestFull(Y=Y, Xr=Xr, Sg=Sg, Ug=Ug, factr=1e7)
        mvset.assoc()
        mvset.gxe()
        mvset.gxehet()
        print('.. permutations')
        mvset.assoc_null(n_nulls=2)
        print('.. bootstrap gxe')
Exemple #35
0
 def setRandomParams(self):
     """
     set random hyperparameters
     """
     params = SP.randn(self.getNumberParams())
     self.setParams(params)
Exemple #36
0
    def init(self,
             init_data,
             Pi=None,
             terms=None,
             noise='gauss',
             init_factors=None,
             unannotated_id="hidden",
             covariates=None,
             dropFactors=True):
        #initialize the model instance"""
        #AGAussNode is defined in ExpresisonNet
        #expr Y ~ N(\mu= expr, \sigma = 0)
        pattern_hidden = re.compile(unannotated_id + '\d')
        pattern_hiddenSparse = re.compile(unannotated_id + "\D*parse" + "\d")

        Ihidden = SP.array(
            [pattern_hidden.match(term) is not None for term in terms])
        IhiddenSparse = SP.array(
            [pattern_hiddenSparse.match(term) is not None for term in terms])

        self.terms = terms
        if not isinstance(init_data, AGaussNode):
            raise Exception("initialization is only possible from a GaussNode")
        self.Z = CNodeZ(node=init_data)

        #datanode hold the data
        self.dataNode = self.Z
        if self.noise == 'poisson':
            self.kappa = 1. / 4.0 + 0.17 * self.Z.E1.max(0)

        if self.noise == 'hurdle':
            self.meanX = self.Z.E1.copy()
            self.isExpressed = (self.Z.E1 > 0) * 1.
        self.numExpressed = SP.sum(self.Z.E1 > 0, 0)

        self.doUpdate = SP.ones((Pi.shape[1], )).astype("int")
        self.dropFactors = dropFactors

        #known covariates
        if init_factors != None and 'Known' in init_factors:
            self.nKnown = init_factors['Known'].shape[1]
            self.Known = init_factors['Known']
            assert self.Known.shape[0] == self.Z.E1.shape[0]
            self.nHidden = self.components - self.nKnown
            if 'Intr' in init_factors:
                self.nKnown = init_factors['Known'].shape[1]
                self.Known = init_factors['Known']
                assert self.Known.shape[0] == self._N
                self.nHidden = self.components - self.nKnown
        elif not (covariates is None):
            self.nKnown = covariates.shape[1]
            #self.iKnown = SP.arange(covariates.shape[1])
            self.Known = covariates
            assert self.Known.shape[0] == self.Z.E1.shape[0]
            self.nHidden = self.components - self.nKnown
            #mean term/'bias'
            if terms[0] == 'bias':
                self.Known = SP.hstack(
                    SP.ones((self.Z.E1.shape[0], 1), self.Known))
                self.nKnown += 1
                self.nHidden = self.nHidden - 1
        #mean term/'bias'
        elif terms[0] == 'bias':
            self.Known = SP.ones(
                (self.Z.E1.shape[0], 1))  #make sure this was correct?
            self.nKnown = 1
            self.nHidden = self.components - self.nKnown
        else:
            self.nHidden = self.components
            self.nKnown = 0

        #set some attributes that we need frequently for the updates, inculuding
        #number and idx of hidden and sparse hidden terms

        if init_factors is not None and 'iLatent' in init_factors:
            self.iLatent = init_factors['iLatent']
            self.nLatent = len(init_factors['iLatent'])
        else:
            self.iLatent = SP.where(Ihidden == True)[0]
            self.nLatent = len(self.iLatent)

        if init_factors is not None and 'iLatentSparse' in init_factors:
            self.iLatentSparse = init_factors['iLatentSparse']
            self.nLatentSparse = len(init_factors['iLatentSparse'])
        else:
            self.iLatentSparse = SP.where(IhiddenSparse == True)[0]
            self.nLatentSparse = len(self.iLatentSparse)

        if init_factors != None and 'onF' in init_factors:
            self.onF = init_factors['onF']
        else:
            self.onF = self.Z.E1.shape[0] / 10000.  #self.nScale

        if init_factors != None and 'initZ' in init_factors:
            self.initZ = init_factors['initZ']
        else:
            self.initZ = Pi.copy()
            self.initZ[self.initZ < .2] = 0.01

        self.nAnno = self.nHidden - self.nLatentSparse - self.nLatent
        #pdb.set_trace()
        #Pi is likelihood of link for genes x factors

        #self.Pi = Pi

        # set dimensionality of the data
        [self._N, self._D] = self.Z.E1.shape
        self.ZZ = SP.zeros((self._D, ))
        for d in range(self._D):
            self.ZZ[d] = SP.sum(self.Z.E1[:, d] * self.Z.E1[:, d], 0)

        PiPriors = [[1., 1.], self.priors['PiDense']['priors'],
                    self.priors['PiSparse']['priors'], [1., 1.]]
        self.Pi = CNodePi(self, PiPriors, Pi)
        self.piInit = Pi.copy()

        self.nodes = {
            'S': CNodeSsparse(self),
            'Pi': self.Pi,
            'W': CNodeWsparseVEM(self),
            'Alpha': CNodeAlphasparse(self, self.priors['Alpha']['priors']),
            'Eps': CNodeEpsSparse(self, self.priors['Eps']['priors'])
        }
        for n in list(self.nodes.keys()):
            setattr(self, n, self.nodes[n])

        self.Non = (self.Pi.E1 > .5).sum(0)
        if self.Pi is not None:
            assert self.Pi.E1.shape == (self._D, self.components)

        #pca initialisation
        Ion = None
        if self.initType == 'pca':
            Ion = random.rand(self.Pi.E1.shape[0],
                              self.Pi.E1.shape[1]) < self.Pi.E1
            self.W.C[:, :, 0] = self.Pi.E1.copy()
            #self.W.C[:,:,0][self.W.C[:,:,0]<=.2] = .1
            #self.W.C[:,:,0][self.W.C[:,:,0]>=.8] = .9
            for k in range(self.components):
                sv = linalg.svd(self.Z.E1[:, Ion[:, k]], full_matrices=0)
                [s0,
                 w0] = [sv[0][:, 0:1],
                        S.dot(S.diag(sv[1]), sv[2]).T[:, 0:1]]
                v = s0.std(axis=0)
                s0 /= v
                w0 *= v
                self.S.E1[:, k] = s0.ravel()
                self.W.E1[Ion[:, k], k] = w0.ravel()
                self.W.E1[~Ion[:, k], k] *= self.sigmaOff
                self.S.diagSigmaS[:, k] = 1. / 2
        if self.initType == 'pcaRand':
            random.seed(222)
            if self.noise == 'hurdle':
                Zstd = self.Z.E1.copy()
                self.meanZ = Zstd.mean(0)
                Zstd -= Zstd.mean(0)
            elif self.noise == 'poisson':
                Zstd = SP.log2(self.Z.E1.astype('float64') + 1)
                Zstd -= Zstd.mean(0)
            else:
                Zstd = self.Z.E1
                #Zstd -= Zstd.mean(0)

            Ion = random.rand(self.Pi.E1.shape[0],
                              self.Pi.E1.shape[1]) < self.initZ
            self.W.C[:, :, 0] = self.initZ
            self.W.C[:, :, 0][self.W.C[:, :, 0] <= .1] = .1
            self.W.C[:, :, 0][self.W.C[:, :, 0] >= .9] = .9
            self.W.C[:, :, 1] = 1. - self.W.C[:, :, 0]

            for k in range(self.nHidden):
                k += self.nKnown
                if Ion[:, k].sum() > 5:
                    #pdb.set_trace()
                    if self.S.E1.shape[0] < 500:
                        pca = PCA(n_components=1)
                    else:
                        pca = PCA(n_components=1,
                                  iterated_power=2,
                                  svd_solver='randomized')
                    s0 = pca.fit_transform(Zstd[:, Ion[:, k]])
                    self.S.E1[:, k] = (s0[:, 0])
                    self.S.E1[:, k] = self.S.E1[:, k] / self.S.E1[:, k].std()

                else:
                    self.S.E1[:, k] = random.randn(self._N, )

                self.W.E1[:, k] = SP.sqrt(1. / self.components) * SP.randn(
                    self._D)
                self.S.diagSigmaS[:, k] = 1. / 2

            if self.nKnown > 0:
                for k in SP.arange(self.nKnown):
                    self.W.E1[:, k] = SP.sqrt(1. / self.components) * SP.randn(
                        self._D)
                    self.S.diagSigmaS[:, k] = 1. / 2
                self.S.E1[:, SP.arange(self.nKnown)] = self.Known
            if self.nLatent > 0:
                for iL in self.iLatent:
                    self.S.E1[:, iL] = random.randn(self._N, )

            # if self.nLatentSparse>0:
            #     for iL in self.iLatentSparse:
            #         #self.S.E1[:,iL] = random.randn(self._N,)
            #         pca = RandomizedPCA(n_components=iL-self.nLatent+1)
            #         s0 = pca.fit_transform(Zstd[:,Ion[:,iL]])
            #         self.S.E1[:,iL] =(s0[:,iL-self.nLatent])
            #         self.S.E1[:,iL] =  self.S.E1[:,iL]/self.S.E1[:,iL].std()

            if self.saveInit == True:
                self.initS = self.S.E1.copy()

        elif self.initType == 'greedy':
            self.S.E1 = random.randn(self._N, self.components)
            self.W.E1 = random.randn(self._D, self.components)
            Ion = (self.Pi.E1 > 0.5)
            self.W.E1[~Ion] *= self.sigmaOff
            for k in range(Ion.shape[1]):
                self.W.E1[Ion[:, k]] *= self.sigmaOn[k]

        elif self.initType == 'prior':
            Ion = random.rand(self.Pi.E1.shape[0],
                              self.Pi.E1.shape[1]) < self.Pi.E1
            self.W.E1[~Ion] *= self.sigmaOff
            for k in range(Ion.shape[1]):
                self.W.E1[Ion[:, k], k] *= self.sigmaOn[k]
        elif self.initType == 'on':
            for k in range(Ion.shape[1]):
                self.W.E1[:, k] *= self.sigmaOn[k]
        elif self.initType == 'random':
            for k in range(self.Pi.E1.shape[1]):
                self.S.diagSigmaS[:, k] = 1. / 2
                self.S.E1[:, k] = SP.randn(self._N)
            self.W.E1 = SP.randn(self._D, self.Pi.E1.shape[1])
            self.W.C[:, :, 0] = self.Pi.E1
            self.W.C[:, :, 0][self.W.C[:, :, 0] <= .2] = .1
            self.W.C[:, :, 0][self.W.C[:, :, 0] >= .8] = .9
            if self.nKnown > 0:
                for k in SP.arange(self.nKnown):
                    self.W.E1[:, k] = SP.sqrt(1. / self.components) * SP.randn(
                        self._D)
                    self.S.diagSigmaS[:, k] = 1. / 2
                self.S.E1[:, SP.arange(self.nKnown)] = self.Known
            if self.saveInit == True:
                self.initS = self.S.E1.copy()

        elif self.initType == 'data':
            assert ('S' in list(init_factors.keys()))
            assert ('W' in list(init_factors.keys()))
            #            Ion = init_factors['Ion']
            Sinit = init_factors['S']
            Winit = init_factors['W']
            self.W.C[:, :, 0] = self.Pi.E1
            self.W.C[:, :, 0][self.W.C[:, :, 0] <= .2] = .1
            self.W.C[:, :, 0][self.W.C[:, :, 0] >= .8] = .9
            for k in range(self.components):
                self.S.E1[:, k] = Sinit[:, k]
                self.W.E1[:, k] = Winit[:, k]
                self.S.diagSigmaS[:, k] = 1. / 2
Exemple #37
0
 def perturbParams(self, pertSize=1e-3):
     """
     slightly perturbs the values of the parameters
     """
     params = self.getParams()
     self.setParams(params + pertSize * SP.randn(params.shape[0]))
Exemple #38
0
 def compute_initial_figure(self):
     self.axes.plot(sp.randn(100))
     plt.show()
     pass
Exemple #39
0
import matplotlib.pyplot as plt
from mpl_format.axes.axis_utils import new_axes
from numpy import arange
from scipy import randn
from scipy.stats import norm

from probability.distributions.conjugate._inv_gamma_normal_conjugate import \
    _InvGammaNormalConjugate

x = arange(0, 20.01, 0.01)
mu, sigma = 2, 3
x_i = mu + sigma * randn(1000)
dist = _InvGammaNormalConjugate(alpha=1, beta=1, x=x_i, mu=2)


def plot_parameters():

    ax = new_axes()
    dist.prior().plot(x=x, color='r', ax=ax)
    dist.posterior().plot(x=x, color='g', ax=ax)
    ax.legend()
    plt.show()


def plot_predictions():

    ax = new_axes()
    predicted = dist.rvs(100000)
    ax.hist(predicted, bins=100, density=True, label='PPD samples')
    x_actual = arange(predicted.min(), predicted.max(), 0.01)
    actual = norm(loc=mu, scale=sigma).pdf(x_actual)
Exemple #40
0
 def testCoefficientsGenerateCorrectDataset(self):
     coeffs = randn(self.model.GetNumberOfPrincipalComponents())
     s = self.model.DrawSample(coeffs)
     computed_coeffs = self.model.ComputeCoefficientsForDataset(s)
     diff = (coeffs - computed_coeffs)[:-1] # we ignore the last coefficient
     self.assertTrue((diff < 1e-3).all()) #don't make the threshold too small, as we are dealing with floats
Exemple #41
0
 def generate(self):
     return scipy.randn() * self.std + self.mean
##################
config.read(os.path.expanduser(laccfg.cfg_fn))

mut_region_start = config.getint('Input', 'mut_region_start')
mut_region_length = config.getint('Input', 'mut_region_length')
seq_start = config.getint('Input', 'seqstart')
seq_end = config.getint('Input', 'seqend')
datab0 = config.get('Input', 'data_fn1')
datab01 = config.get('Input', 'data_fn2')
datab02 = config.get('Input', 'data_fn3')
datab03 = config.get('Input', 'data_fn4')
datab04 = config.get('Input', 'data_fn5')

# initialize random energy matrix
emat_0 = MCMC_utils_mscs.fix_matrix_gauge(
    sp.randn(4, mut_region_length + 21)
)  #RNAP emat from 0 to mut_region length, CRP emat from mut_region length to mut_region_length + 20, all other parameters in last collumn
emat_0[:, -1] = np.array(
    [-5, -3, 2.5, 2.5]
)  #interaction energy, Additive constant to CRP matrix, multiplicative constant for CRP, mutliplicative constant for RNAP
# load in the data

numseq = [[] for i in range(0, 4)]
seq_mat = [[] for i in range(0, 4)]

sequences0 = readcollatedremoveoligos.collatedmat(datab0)
sequences1 = readcollatedremoveoligos.collatedmat(datab01)
sequences2 = readcollatedremoveoligos.collatedmat(datab02)
sequences3 = readcollatedremoveoligos.collatedmat(datab03)
sequences4 = readcollatedremoveoligos.collatedmat(datab04)
Exemple #43
0
        'http://fond-d-ecran-gratuit.org/photos/plage-mer-petites-vagues.jpg'
    ).resize(800, 600)
    kite_base = Image(
        'http://www.winds-up.com/images/annonces/7915_1.jpg').resize(
            50, 50).invert()
    disp = Display()

    i_loop = 0
    #pid = PID.PID(1, 1, 0.1)
    offset = sp.pi / 3 * 0
    kite_model = kiteModel()
    dX = 0 * kite_model.X
    while disp.isNotDone():
        setpoint = sp.pi / 1.7 * sp.sin(2 * sp.pi / 7 * time.time()) + offset
        i_loop = i_loop + 1
        order = 0 + 0 * sp.randn(
            1) / 5 + 2.0 * disp.mouseX / background.width - 1
        #error = X[0] -setpoint
        #order = sp.randn(1)/100 +  pid.computeCorrection(error, dX[0]/dt-0)
        #pid.incrementTime(error, dt)
        dt = 0.1
        kite_model.update(order, dt)
        print kite_model.X
        kite = kite_base.rotate(sp.rad2deg(kite_model.X[0]),
                                fixed=False).invert()
        #kite.save(disp)d
        #background.blit(kite, (799,0)).save(disp)

        toDisplay = background.blit(
            kite.invert(),
            (max(
                -kite.width + 1,
Exemple #44
0

def send_pkg(sock, pkg):
    """send one SimPkg"""

    sock.sendall(pkg.packed_size)
    sock.sendall(pkg())


##---MAIN

if __name__ == '__main__':

    print
    print 'PACKAGE TEST - constructor with randn(4,4) and arange(10)'
    mypkg = SimPkg(SimPkg.T_UKN, 1337, 666, (N.randn(4, 4), N.arange(10)))
    print mypkg
    print
    print 'PACKAGE TEST - from package'
    newpkg = SimPkg.from_data(mypkg.payload)
    print newpkg
    print
    print 'mypkg == newpkg :', mypkg == newpkg
    print
    print ' --- %s \n --- \n equals \n --- %s \n --- \n %s' % (
        mypkg(), newpkg(), mypkg() == newpkg())
    print
    print 'unpack(\'!I\', newpkg.packed_size)[0] == len(newpkg) :', unpack(
        '!I', newpkg.packed_size)[0] == len(newpkg)
    print
    print 'PACKAGE TEST DONE'
Exemple #45
0
  upload, perform the operation, and then download again, so for
  small times the CPU performs better. On mine at n=1000 it's about
  the same, but for N=4000 the GPU is 10x faster.
- I'm not sure how you're supposed to indicate failure and return
  in a Weave inline function, so that's just ignored for the moment,
  if one of the functions returns an error the program will
  probably just crash.
'''

from numpy import *
from scipy import weave
from scipy import randn
import time

n = 4000
x = array(randn(n, n), order='F')
y = array(randn(n, n), order='F')
z = array(zeros((n, n)), order='F')

code = '''
cublasStatus status;
double *d_x = 0;
double *d_y = 0;
double *d_z = 0;
double alpha = 1.0;
double beta = 0.0;
int n2 = n*n; 

//fprintf (stderr, "fprintf working\\n");

status = cublasInit();
Exemple #46
0
import sys
sys.path.append(r'./..')
sys.path.append(r'./../../../pygp')
#sys.path.append(r'./../../build/debug.win32/interfaces/python')

import limix
print limix.__file__
import pygp.covar.linear as lin
import pygp.covar.se as se
import pygp.covar.gradcheck as GC
import pygp.covar.combinators as comb
import scipy as SP
import pdb

n_dimensions = 3
X = SP.randn(3, n_dimensions)

params = SP.zeros([0])

if 0:
    c1 = limix.CCovLinearISO()
    c2 = lin.LinearCFISO(n_dimensions=n_dimensions)

    c1.setX(X)

    K1 = c1.K()
    K2 = c2.K(params, X, X)

    dK1 = c1.Kgrad_param(0)
    dK2 = c2.Kgrad_theta(params, X, 0)
Exemple #47
0
    @cached('Yres')
    def Yres(self):
        return self.Y - self.predict_in_sample()

    @cached('Yres')
    def yres(self):
        r = vec(self.Yres())
        if self._miss:
            r = r[~self._veIok]
        return r


if __name__ == '__main__':

    # define phenotype
    N = 1000
    P = 4
    Y = sp.randn(N, P)

    # define fixed effects
    F = []
    A = []
    F.append(sp.randn(N, 3))
    F.append(sp.randn(N, 2))
    A.append(sp.eye(P))
    A.append(sp.ones((1, P)))

    pdb.set_trace()

    mean = MeanKronSum(Y, F, A)
Exemple #48
0
'''
barcodefn = config.get('Input','barcodefn')

barcode_dict = {}
reverse_dict = {}
csvfile = open(barcodefn,'r')
reader = csv.DictReader(csvfile)
for row in reader:
    barcode_dict[row['experiment_name']] = row['fwd_barcode']
    reverse_dict[row['experiment_name']] = row['rev_barcode']
'''

seq_mat_temp, batch_vec_temp = MCMC_utils.load_unique_seqs_batches(
    data_fn, seq_start + mut_region_start, mut_region_length)

emat_0 = MCMC_utils.fix_matrix_gauge(sp.randn(4, mut_region_length))

# shuffle the elements of seq_mat and batch_vec. This will prevent
# spuriously high mutual information values

index_shuf = range(len(batch_vec_temp))
sp.random.shuffle(index_shuf)
seq_mat = sp.zeros(
    [4, len(seq_mat_temp[0, :, 0]),
     len(seq_mat_temp[0, 0, :])], dtype='int')
batch_vec = sp.zeros_like(batch_vec_temp)
for i, i_s in enumerate(index_shuf):
    seq_mat[:, :, i] = seq_mat_temp[:, :, i_s]
    batch_vec[i] = batch_vec_temp[i_s]

Exemple #49
0
 def _produceSample(self):
     return randn(self.numParameters)