예제 #1
0
def g_PY_Beta(Nu, Beta, Omega, Y, SigmaY, B0, Ngene, Ncell, Nsample):

    # Ngene by Nsample
    Exp = ExpQ(Nu, Beta, Omega, Ngene, Ncell, Nsample)
    Var = VarQ(Nu, Beta, Omega, Ngene, Ncell, Nsample)
        
    # Nsample by Ncell be Ngene
    g_Exp = g_Exp_Beta(Nu, Omega, Beta, B0, Ngene, Ncell, Nsample)
    g_Var = g_Var_Beta(Nu, Omega, Beta, B0, Ngene, Ncell, Nsample)
        
    # Nsample by Ncell by Ngene
    a = np.empty((Nsample, Ncell, Ngene))
    for c in range(Ncell):
        a[:,c,:] = np.divide((g_Var[:,c,:] * t(Exp) - 2 * g_Exp[:,c,:]*t(Var)),np.power(t(Exp),3))
    
    b = np.empty((Nsample, Ncell, Ngene))
    Var_Exp2 = np.divide(Var, 2*np.square(Exp))
    for s in range(Nsample):
        for c in range(Ncell):
            for g in range(Ngene):
                b[s,c,g] = - (Y[g,s] - np.log(Exp[g,s]) - Var_Exp2[g,s]) *(2*np.divide(g_Exp[s,c,g],Exp[g,s]) + a[s,c,g])

    grad_PY = np.zeros((Nsample, Ncell))
    for s in range(Nsample):
        for c in range(Ncell):
            grad_PY[s,c] = grad_PY[s,c] - np.sum(0.5 / np.square(SigmaY[:,s]) * (a[s,c,:] + b[s,c,:]))
    
    return grad_PY
예제 #2
0
def conjugate_gradient(A, b, x0):
    """
    Applies conjugate gradient algorithm to find the minimum of function

    Algorithm from laboratory nr 8

    :param A: matrix 2x2
    :param b: matrix 1x2
    :param x0: starting point (eg [0,0])
    :return:  all the solutions, the last one being the most accurate
    """
    eps = 1e-10
    x = x0
    dk = rk = b - dot(A, x)
    solutions = []
    while norm(rk) > eps:
        rate = dot(t(rk), rk) / dot(dot(t(dk), A), dk)
        x = x + rate * dk
        rk_n = rk - rate * dot(A, dk)
        bk = dot(t(rk_n), rk_n) / dot(t(rk), rk)
        rk = rk_n
        dk = rk + bk * dk
        solutions.append(x)

    return solutions
예제 #3
0
def LR_smooth(Y, X_):
    X = add_intercept(X_)
    yhat = np.zeros(Y.shape)
    theta = np.zeros(2)
    theta = dot(np.linalg.inv(dot(t(X), X)), dot(t(X), Y))
    yhat = dot(X, theta)[:, np.newaxis]
    return yhat, theta
예제 #4
0
    def CreateLmComp(self):
        '''
        This function generates components for the linear model: hrf, whitening matrix, autocorrelation matrix and CX
        '''

        # hrf
        self.canonical()

        # contrasts
        # expand contrasts to resolution
        self.CX = np.array(np.kron(self.C, np.eye(self.laghrf)))
        assert(self.CX.shape[0]==self.C.shape[0]*self.laghrf)
        assert(self.CX.shape[1]==self.n_stimuli*self.laghrf)

        # drift
        self.S = self.drift(np.arange(0, self.n_scans))  # [tp x 1]
        assert(self.S.shape == (3,self.n_scans))
        self.S = np.matrix(self.S)

        # square of the whitening matrix
        base = [1 + self.rho**2, -1 * self.rho] + [0] * (self.n_scans - 2)
        self.V2 = scipy.linalg.toeplitz(base)
        # set first and last to 1
        self.V2[0, 0] = 1
        self.V2[self.n_scans - 1, self.n_scans - 1] = 1
        self.V2 = np.matrix(self.V2)

        self.white = self.V2 - self.V2 * \
            t(self.S) * np.linalg.pinv(self.S *
                                       self.V2 * t(self.S)) * self.S * self.V2

        return self
예제 #5
0
def LM(Y, Z, X, beta_0):
    Z_PI = Z @ PI_tilde(Y, Z, X, beta_0)
    P_zpi = np.outer(Z_PI, t(Z_PI)) * \
        ((t(Z_PI) @ Z_PI) ** (-1))
    LM = ((1 / sigma_eps_eps(Y, Z, X, beta_0)) *
          ((t(Y - X * beta_0)) @ P_zpi @ (Y - X * beta_0)))
    return LM
예제 #6
0
def B_init(dim_hid,dim_vis):
	B = rd.uniform(size = (dim_hid+1,dim_vis+1))
	B[:,0] = 0
	B[0,:] = 0
	B[0,0] = 1
	B = t(t(B) / np.sum(B, axis = 1))
	return B
예제 #7
0
def hessian(x, y, theta):  #constructing hessian matrix
    H = 0
    for i in range(len(y)):
        H+=np.exp(-y[i]*np.dot(t(theta),x[i,]))*y[i]**2\
                 *np.outer(x[i],t(x[i]))*sigmoid(y[i],x[i,],theta)**2
    H /= len(y)
    return H
예제 #8
0
def g_Exp_Beta_python(Nu, Omega, Beta, B0, Ngene, Ncell, Nsample):
    g_Exp = t(np.tile(np.exp(Nu + 0.5*np.square(Omega))[:,:,np.newaxis], [1,1,Nsample]), (2,1,0))\
                /np.tile(B0[:,np.newaxis, np.newaxis], [1,Ncell, Ngene])\
            - t(np.tile(np.sum( # Nsample Ncell Ngenes
                t(np.tile(np.exp(Nu + 0.5*np.square(Omega))[:,:,np.newaxis], [1,1,Nsample]), (2,1,0))\
                *np.tile((Beta/np.tile(np.square(B0)[:,np.newaxis], [1,Ncell]))[:,:, np.newaxis], [1,1, Ngene]), axis=1
                )[:,:,np.newaxis], [1,1,Ncell]), (0,2,1))
    return g_Exp
예제 #9
0
def PI_tilde(Y, Z, X, beta_0):
    M_z = np.identity(n=N) - Z @ inv(t(Z) @ Z) @ t(Z)
    sigma_e_e = sigma_eps_eps(Y, Z, X, beta_0)
    sigma_e_V = sigma_eps_V(Y, Z, X, beta_0)
    eps = Y - X * beta_0
    ro_hat = sigma_e_V / sigma_e_e
    PI_tilde = inv(t(Z) @ Z) @ t(Z) @ (X - eps * ro_hat)
    return (PI_tilde)
예제 #10
0
def eval_f(A, b, x):
    """

    :param A: function component
    :param b:  function component
    :param x: value to evaluate
    :return: f(x) where f is the function represented by A & b
    """
    return 0.5 * dot(dot(x, A), t(x)) - dot(x, t(b))
예제 #11
0
 def pearsonr(signals, nstim):
     cor = []
     varcov = np.zeros([len(signals), len(signals)])
     for sig1 in range(len(signals)):
         for sig2 in range(sig1, len(signals)):
             cors = np.diag(np.corrcoef(
                 t(signals[sig1]), t(signals[sig2]))[nstim:, :nstim])
             varcov[sig1, sig2] = np.mean(cors)
             varcov[sig2, sig1] = np.mean(cors)
     return varcov
예제 #12
0
def LWR_smooth(Y, X, tau):  #locally weighted regression
    Y_hat = np.zeros(Y.shape)  #initialize
    for i in range(len(Y)):
        W = np.diag(np.exp(-(X[i] - X)**2 / 2 /
                           tau**2))  ## construct weight matrix
        Y_hat[i] = X[i] * reduce(dot, [t(X), W, X])**(-1) * reduce(
            dot, [t(X), W, Y])
    ##reduce is for triple dot product
    ##return prediction
    return Y_hat
예제 #13
0
 def FdCalc(self, Design):
     W = np.matrix(Design['Z'])
     X = t(W) * self.white * W
     invM = scipy.linalg.pinv(X)
     CMC = np.matrix(self.C) * invM * np.matrix(t(self.C))
     if self.Aoptimality == True:
         Design["Fd"] = float(self.rc / np.matrix.trace(CMC))
     else:
         Design["Fd"] = float(np.linalg.det(CMC)**(-1 / self.rc))
     return Design
예제 #14
0
 def FdCalc(self,Design):
     W = np.matrix(Design['Z'])
     X = t(W)*self.white*W
     invM = scipy.linalg.pinv(X)
     CMC = np.matrix(self.C)*invM*np.matrix(t(self.C))
     if self.Aoptimality == True:
         Design["Fd"] = float(self.rc/np.matrix.trace(CMC))
     else:
         Design["Fd"] = float(np.linalg.det(CMC)**(-1/self.rc))
     return Design
예제 #15
0
def r_beta_0(Y, Z, X, beta_0):
    M_z = np.identity(n=N) - Z @ inv(t(Z) @ Z) @ t(Z)
    SIGMA_VV = 1 / (N - k) * t(X) @ M_z @ X
    SIGMA_VV_eps = SIGMA_VV - \
                    ((sigma_eps_V(Y, Z, X, beta_0) ** 2) / sigma_eps_eps(Y, Z, X, beta_0))
    PI_tld = PI_tilde(Y, Z, X, beta_0)
    Z_PI = Z @ PI_tld
    r_beta = (1 / SIGMA_VV_eps) * \
        t(Y - X * beta_0) * (t(Z_PI) @ Z_PI) @ (Y - X * beta_0)
    return r_beta
예제 #16
0
def RsT(R, n, s):
    """

    :param R: 3 dimensional matrix
    :param n: integer
    :param s: integer
    :return: integer
    """
    B = []
    for k in range(0, (n - 2)):
        B.append(Beta(k + 1))
    # print(t(R[0][s,0:(n - 2 - s)]))
    rst = sum(t(t(R[0][s, 0:(n - 2 - s)]) * B[0:(n - 2 - s)]))
    return rst
예제 #17
0
def compute_hydraulic_p(qc):
    # Débits des arcs
    q = q0 + dot(B, qc)
    # Pertes de charge des arcs
    z = r * abs(q) * q
    # Flux des noeuds
    f = np.zeros(m)
    f[:mr] = dot(Ar, q)
    f[mr:] = fd
    # Pression aux noeuds
    p = np.zeros(m)
    p[:mr] = pr
    p[mr:] = -dot(t(AdI), (dot(t(Ar), pr) + z)[:md])

    return q, z, f, p
예제 #18
0
    def FdCalc(self, Aoptimality=True):
        '''
        Compute detection power.

        :param Aoptimality: Kind of optimality to optimize: A- or D-optimality
        :type Aoptimality: boolean
        '''
        DES = {"order": self.order, "ITI": self.ITI}
        pickle.dump(DES, open("/home/jdurnez/SVDerrorCheck/design.p", "wb"))
        try:
            invM = scipy.linalg.inv(self.Z)
        except scipy.linalg.LinAlgError:
            try:
                invM = scipy.linalg.pinv(self.Z)
            except numpy.linalg.linalg.LinAlgError:
                invM = np.nan
        sys.exc_clear()
        invM = np.array(invM)
        CMC = np.matrix(self.C) * invM * np.matrix(t(self.C))
        if Aoptimality == True:
            self.Fd = float(len(self.C) / np.matrix.trace(CMC))
        else:
            self.Fd = float(np.linalg.det(CMC)**(-1 / len(self.C)))
        self.Fd = self.Fd / self.experiment.FdMax
        return self
예제 #19
0
def norm_rnd(sigma):
    h = chol(sigma)
    size = shape(sigma)
    rv = randn(size = (size[0],1))
    y = t(h) @ rv

    return y
예제 #20
0
    def FeCalc(self, Aoptimality=True):
        '''
        Compute estimation efficiency.

        :param Aoptimality: Kind of optimality to optimize, A- or D-optimality
        :type Aoptimality: boolean
        '''
        try:
            invM = scipy.linalg.inv(self.X)
        except scipy.linalg.LinAlgError:
            try:
                invM = scipy.linalg.pinv(self.X)
            except numpy.linalg.linalg.LinAlgError:
                invM = np.nan
        sys.exc_clear()
        print("step1")
        invM = np.array(invM)
        print("step2")
        st1 = np.dot(self.CX, invM)
        print("step3")
        CMC = np.dot(st1, t(self.CX))
        if Aoptimality == True:
            self.Fe = float(self.CX.shape[0] / np.matrix.trace(CMC))
        else:
            self.Fe = float(np.linalg.det(CMC)**(-1 / len(self.C)))
        self.Fe = self.Fe / self.experiment.FeMax
        return self
예제 #21
0
def fit_sgd(X, Y, epoch=1000, lr=0.0001):
    w = np.zeros(X.shape[1])
    b = 0
    lr = lr
    for epoch in range(epoch):
        for x, y in zip(X, Y):  # mini batches of size 1
            pred = squeeze(t(w).dot(t(x)) + b)
            err = (y - pred)
            grad_w = (-2 * x) * err
            grad_b = (-2) * err
            w = w - lr * grad_w
            b = b - lr * grad_b

    def f(x):
        return squeeze(t(w).dot(t(x)) + b)

    return f
예제 #22
0
def verify_equilibrum(q, z, f, p):
    # Ecarts maximaux sur les lois de Kirschoff
    tol_debits = max(abs(dot(A, q) - f))
    tol_pression = max(abs(dot(t(A), p) + z))
    # Affichage
    print("Vérification des équations d'équilibre du réseau")
    print("Sur les débits : {}".format(tol_debits))
    print("Sur les pressions : {}".format(tol_pression))
예제 #23
0
def my_nlrm(y, x, beta_0, v_0, h_0, nu_0):
    b0_cov = nu_0 * h_0**(-1) / (nu_0 -
                                 2) * v_0  # analogie (3.16) z Koop (2003)
    b0_std = np.array([[sqrt(var)] for var in diag(b0_cov)])
    h0_std = sqrt(2 * h_0 / nu_0)  # analogie odmocniny z (3.19) z Koop (2003)

    n = len(y)  # pocet pozorovani

    b_ols = inv(t(x) @ x) @ t(x) @ y  # (3.5) z Koop (2003)
    nu_ols = n - x.shape[1]  # (3.4) z Koop (2003)
    s2_ols = 1 / nu_ols * t(y - x @ b_ols) @ (y - x @ b_ols
                                              )  # (3.6) z Koop (2003)

    v_1 = inv(inv(v_0) + t(x) @ x)  # (3.9) z Koop (2003)
    beta_1 = v_1 @ (inv(v_0) @ beta_0 + t(x) @ x @ b_ols
                    )  # (3.11) z Koop (2003)
    nu_1 = nu_0 + n  # (3.12) z Koop (2003)

    h_1 = nu_1 / (
        nu_0 / h_0 + nu_ols * s2_ols +
        t(b_ols - beta_0) @ inv(v_0 + inv(t(x) @ x)) @ (b_ols - beta_0))
    # (3.13) z Koop (2003), kdy h_1 = s2_1^-1

    b1_cov = nu_1 / h_1 / (nu_1 - 2) * v_1  # (3.16) z Koop (2003)
    b1_std = np.array([[sqrt(var)] for var in diag(b1_cov)])
    h1_std = sqrt(2 * h_1 / nu_1)  # odmocnina z (3.19) z Koop (2003)

    log_c = gammaln(nu_1 / 2) + nu_0 / 2 * log(nu_0 / h_0) - gammaln(
        nu_0 / 2) - n / 2 * log(pi)
    # logaritmus (3.35) z Koop (2003)
    log_ml = log_c + 1 / 2 * (log(det(v_1)) - log(det(v_0))) - nu_1 / 2 * log(
        nu_1 / h_1)
    # logaritmus (3.34) z Koop (2003)

    results = {
        'x': x,
        'y': y,
        'beta_0': beta_0,
        'v_0': v_0,
        'h_0': h_0,
        'nu_0': nu_0,
        'b0_cov': b0_cov,
        'b0_std': b0_std,
        'h0_std': h0_std,
        'n': n,
        'b_ols': b_ols,
        'nu_ols': nu_ols,
        's2_ols': s2_ols,
        'beta_1': beta_1,
        'h_1': h_1,
        'v_1': v_1,
        'nu_1': nu_1,
        'b1_cov': b1_cov,
        'b1_std': b1_std,
        'h1_std': h1_std,
        'log_c': log_c,
        'log_ml': log_ml
    }

    return results
예제 #24
0
def my_nlrm(y, X, beta_0, V_0, h_0, nu_0):
    # Vypocet charakteristik apriornich hustot
    # Vypocet kovariancni matice pro beta
    b0_cov = nu_0 * (1 / h_0) / (nu_0 -
                                 2) * V_0  # analogie (3.16) z Koop (2003)
    b0_std = [sqrt(cov) for cov in diag(b0_cov)]

    # Apriorni sm. odchylka pro h
    h0_std = sqrt(2 * h_0 / nu_0)  # analogie (3.19) z Koop (2003)

    # Vypocet aposteriornich hyperparametru
    N = len(y)

    # Odhady OLS
    b_OLS = inv(t(X) @ X) @ t(X) @ y  # (3.5) z Koop (2003)
    nu_OLS = N - shape(X)[1]  # (3.4) z Koop (2003)
    s2_OLS = t(y - dot(X, b_OLS)) @ (
        y - dot(X, b_OLS)) / nu_OLS  # (3.6) z Koop (2003)

    # Aposteriorni hyperparametry
    V_1 = inv(inv(V_0) + (t(X) @ X))  # (3.10) z Koop (2003)
    beta_1 = V_1 @ inv(V_0) @ beta_0 + t(X) @ dot(
        X, b_OLS)  # (3.11) z Koop (2003)
    beta_1 = asmatrix(beta_1)
    nu_1 = nu_0 + N  # (3.12) z Koop (2003)
    h_1 = nu_1 * 1/(nu_0 * 1/h_0 + nu_OLS * s2_OLS + \
          t(b_OLS - beta_0) @ inv(V_0 + \
          inv(t(X) @ X)) @ (b_OLS - beta_0))                    # (3.13) z Koop (2003)

    # Aposteriorni kovariancni matice a smerodatne odchylky
    b1_cov = (nu_1 * 1 / h_1) / (nu_1 - 2) * V_1  # (3.16) z Koop (2003)
    b1_std = [sqrt(cov) for cov in diag(b1_cov)]

    # Aposteriorni smerodatna odchylka pro presnost chyby h
    h1_std = sqrt(2 * h_1 / nu_1)  # (3.19) z Koop (2003), odmocneno

    # Logaritmus marginalni verohodnosti
    log_c = lgamma(nu_1/2) + nu_0/2 * log(nu_0/h_0) - \
        lgamma(nu_0/2) - N/2 * log(pi)                          # (3.35) z Koop (2003), logaritmovano
    log_ML = log_c + 1/2 * (log(det(V_1)) - log(det(V_0))) - \
             nu_1/2 * log(nu_1/h_1)                             # (3.34) z Koop (2003), logaritmovano

    # Ulozeni vysledku
    results = {
        'b0_cov': b0_cov,
        'b0_std': b0_std,
        'h0_std': h0_std,
        'beta_1': beta_1,
        'h_1': h_1,
        'V_1': V_1,
        'nu_1': nu_1,
        'b1_cov': b1_cov,
        'b1_std': b1_std,
        'h1_std': h1_std,
        'log_ML': log_ML
    }

    return results
def oracle(u, compute_gradient=True, compute_hessian=False):
    q, delta = q_hat(u)
    # Critère
    # on prend l'opposée du critère réel pour passer à un problème de minimisation
    loss = -(1./3*dot(q, r*q*np.abs(q)) + dot(pr, dot(Ar, q)) + dot(u, dot(Ad, q) - fd))
    # Dérivée du critère par rapport à u
    # On prend l'opposée du critère réel pour passer à un problème de minimisation
    gradient = -( dot(Ad, q) - fd) if compute_gradient else None
    hessian = -dot(dot(Ad, delta), t(Ad)) if compute_hessian else None
    
    return loss, gradient, hessian
예제 #26
0
def g_PY_Beta_python(Nu, Beta, Omega, Y, SigmaY, B0, Ngene, Ncell, Nsample):
    # Ngene by Ncell by Nsample
    Exp = t(np.tile(ExpQ(Nu, Beta, Omega, Ngene, Ncell, Nsample)[:,:,np.newaxis], [1,1,Ncell]), (0,2,1))
    Var = t(np.tile(VarQ(Nu, Beta, Omega, Ngene, Ncell, Nsample)[:,:,np.newaxis], [1,1,Ncell]), (0,2,1))
        
    # Nsample by Ncell be Ngene
    g_Exp = g_Exp_Beta(Nu, Omega, Beta, B0, Ngene, Ncell, Nsample)
    g_Var = g_Var_Beta(Nu, Omega, Beta, B0, Ngene, Ncell, Nsample)
        
    # Nsample by Ncell by Ngene
    a = (g_Var * t(Exp, (2,1,0)) - 2*g_Exp*t(Var, (2,1,0)))/t(np.power(Exp,3), (2,1,0))
    b = - (\
            t(np.tile(Y[:,:,np.newaxis], [1,1,Ncell]), (1,2,0))\
            - t(np.log(Exp+0.000001), (2,1,0))- t(Var/(2*np.square(Exp)), (2,1,0))\
        )*(2*g_Exp/t(Exp, (2,1,0)) + a)

    grad_PY = -np.sum( 0.5 / t(np.tile(np.square(SigmaY)[:,:,np.newaxis], [1,1,Ncell]), (1,2,0)) \
           * ( a + b ), axis=2)
 
   
    return grad_PY
예제 #27
0
def fit_normal_equation(X, Y):
    X = np.c_[np.ones((len(X), 1)),
              X]  # adding ones for bias in affine functions
    tX = t(X)
    w = inv(tX.dot(X)).dot(tX).dot(Y)

    def f(x):
        x = np.c_[np.ones((len(x), 1)),
                  x]  # adding ones for bias in affine functions
        return squeeze(t(w).dot(t(x)))

    return f
예제 #28
0
def g_Exp_Beta(Nu, Omega, Beta, B0, Ngene, Ncell, Nsample):
    
    ExpX = np.exp(Nu)
    for i in range(Nsample):
        ExpX[i,:,:] = ExpX[i,:,:]*np.exp(0.5*np.square(Omega)) #Nsample by Ngene by Ncell
    B0mat = np.empty(Beta.shape)
    for c in range(Ncell):
        B0mat[:,c] =Beta[:,c]/np.square(B0)
    #B0mat = np.dot(B0mat, t(ExpX)) # Nsample by Ngene
    tmp = np.empty((Nsample, Ngene))
    for i in range(Nsample):
        tmp[i,:] = np.dot(B0mat[i,:], t(ExpX[i,:,:]))
    B0mat = tmp

    g_Exp = np.empty((Nsample, Ncell, Ngene))

    for s in range(Nsample):
        for c in range(Ncell):
            g_Exp[s,c,:] = t(ExpX[s,:,c] / B0[s]) - B0mat[s,:]


    return g_Exp
예제 #29
0
def Optimize(logY, SigmaY, Mu0, Alpha, Alpha0, Beta0, Kappa0, Nu_Init, Omega_Init, Nsample, Ncell, Init_Fraction):
    Beta_Init = np.random.gamma(shape=1, size=(Nsample, Ncell)) * 0.1 + t(Init_Fraction) * 10
    obs = BLADE(logY, SigmaY, Mu0, Alpha, Alpha0, Beta0, Kappa0,
            Nu_Init, Omega_Init, Beta_Init, fix_Nu=True, fix_Omega=True)
    obs.Optimize()
    
    #obs.Fix_par['Beta'] = True
    obs.Fix_par['Nu'] = False
    obs.Fix_par['Omega'] = False
    obs.Optimize()

    #obs.Fix_par['Beta'] = False
    #obs.Optimize()
    return obs
예제 #30
0
    def CreateLmComp(self):
        # compute components for linear model (drift, autocorrelation, projection of drift)

        # drift
        self.S = self.drift(np.arange(0, self.duration, self.TR))  #[tp x 1]
        self.S = np.matrix(self.S)

        # square of the whitening matrix
        base = [1 + self.rho**2, -1 * self.rho] + [0] * (self.tp - 2)
        self.V2 = scipy.linalg.toeplitz(base)
        self.V2[0, 0] = 1
        self.V2[self.tp - 1, self.tp - 1] = 1
        self.V2 = np.matrix(self.V2)
        self.V = scipy.linalg.sqrtm(self.V2)
        P = t(self.S) * np.linalg.pinv(self.S * t(self.S)) * self.S

        self.white = t(self.V) * (np.eye(self.tp) - P) * self.V

        # orthogonal projection of whitened drift
        #VS = self.V*self.S
        #self.Pvs = reduce(np.dot,[VS,np.linalg.pinv(np.dot(t(VS),VS)),t(VS)])

        return self
예제 #31
0
 def __init__(self, dfy, dfX, numeigen = 0, minproportion = 0, vce = None, cluster = None):
     print("PCA Regression:")
     self.vce = vce
     self.cluster = cluster
     self.varlist = dfX.columns
     self.X = dfX.values
     self.Xt = self.X.transpose()
     self.XtX = self.Xt @ self.X
     self.lam, self.v = np.linalg.eigh(self.XtX)
     idx = self.lam.argsort()[::-1]
     self.v = self.v[:,idx]
     self.lam = self.lam[idx]
     self.lp = self.lam/sum(self.lam)
     self.dfA = pd.DataFrame(self.X @ self.v, columns = np.arange(len(self.lam))+1)
     self.l = len(self.lam)
     if numeigen > 0 and minproportion > 0:
         print("Error, only one is required")
         pass
     elif minproportion > 0:
         proportion = 0
         self.num = 0
         for i in range(len(self.lp)):
             self.num = self.num + 1
             proportion = proportion + self.lp[i]
             if proportion >= minproportion:
                 break
     elif numeigen > 0: 
         self.num = numeigen
     else:
         print("Selecting all components")
         self.num = len(self.lam)
     print("Number of components selected: {}".format(self.num))
     self.est = OLS(dfy, self.dfA.iloc[:,0:self.num], nocons = True, vce = self.vce, cluster = self.cluster)
     self.g = self.est.b
     self.SEg = self.est.SEb
     self.b = self.v[:,0:self.num] @ self.g
     self.Varb = self.v[:,0:self.num] @ self.est.Varb1 @ self.v[:,0:self.num].transpose()
     if np.any(np.diag(self.Varb) < 0):
             print("Non Positive Semi-Definite VCE Matrix! Cameron, Gelbach & Miller (2011) Transformation Used")
             lb, vb = eigh(self.Varb)
             idx = lb.argsort()[::-1]
             vb = vb[:,idx]
             lb = lb[idx]
             for i in range(len(lb)):
                 lb[i] = max(0, lb[i])
             diag = lb * np.identity(len(lb))
             self.Varb = vb @ diag @ t(vb)
     self.SEb = np.sqrt(np.diag(self.Varb))
     self.t = self.b/self.SEb
     self.pval = 2*ss.t.cdf(-abs(self.t), self.est.df)
예제 #32
0
    def CreateLmComp(self):
        # compute components for linear model (drift, autocorrelation, projection of drift)

        # drift
        self.S = self.drift(np.arange(0,self.duration,self.TR)) #[tp x 1]
        self.S = np.matrix(self.S)

        # square of the whitening matrix
        base = [1+self.rho**2,-1*self.rho]+[0]*(self.tp-2)
        self.V2 = scipy.linalg.toeplitz(base)
        self.V2[0,0] = 1
        self.V2[self.tp-1,self.tp-1] = 1
        self.V2 = np.matrix(self.V2)
        self.V = scipy.linalg.sqrtm(self.V2)
        P = t(self.S)*np.linalg.pinv(self.S*t(self.S))*self.S

        self.white = t(self.V)*(np.eye(self.tp)-P)*self.V

        # orthogonal projection of whitened drift
        #VS = self.V*self.S
        #self.Pvs = reduce(np.dot,[VS,np.linalg.pinv(np.dot(t(VS),VS)),t(VS)])

        return self
예제 #33
0
def compute_hydraulic_d(pd):
    # Pressions aux noeuds
    p = np.zeros(m)
    p[:mr] = pr
    p[mr:m] = pd
    # Pertes de charge des arcs
    z = dot(-t(A), p)
    # Debits des arcs
    q = z / np.sqrt(r * abs(z))
    # Flux aux noeuds
    f = np.zeros(m)
    f[:mr] = dot(Ar, q)
    f[mr:m] = fd

    return q, z, f, p
def diag(mat):
    return t(matrix(diagonal(matrix(mat))))
예제 #35
0
 def downWin(self):
     x = t(self.board).tolist()
     return self.straightWin(x)
예제 #36
0
def A_init(dim_hid):
	A = rd.uniform(size = (dim_hid+1,dim_hid+1))
	A[:,0] = 0
	A[0,1:(dim_hid+1)] = 1./dim_hid
	A = t(t(A) / np.sum(A, axis = 1))
	return A