コード例 #1
0
ファイル: pglasso.py プロジェクト: jingkungao/PGlasso
def solveBlockGlasso(signal):
    start = int(signal[0]) # include
    S_Matrix  = S_Matrix_bc.value
    W_matrix = W_Matrix_bc.value
    old_W = np.copy(W_matrix)
    end   = min(int(signal[1]),S_Matrix.shape[0]) # non-inclusive
    deltamatrix = np.zeros(S_Matrix.shape)
    NN = S_Matrix.shape[0]
    for n in range(start,end):
        W11 = np.delete(W_matrix,n,0)
        W11 = np.delete(W11,n,1)
        Z   = linalg.sqrtm(W11)

        s11 = S_Matrix[:,n]
        s11 = np.delete(s11,n)
        Y   = np.dot(nplinalg.inv(linalg.sqrtm(W11)),s11)
	Y = np.real(Y)
	Z = np.real(Z)
	B = lasso(Z,Y,beta_value)

    updated_column = np.dot(W11,B)

    matrix_ind = np.array(range(0,NN))
    matrix_ind = np.delete(matrix_ind,n)
    column_ind = 0
    for k in matrix_ind:
        deltamatrix[k,n]=updated_column[column_ind] - W_matrix[k,n]
        deltamatrix[n,k]=updated_column[column_ind] - W_matrix[k,n]
	    W_matrix[k,n] = updated_column[column_ind]
	    W_matrix[n,k] = updated_column[column_ind]
        column_ind = column_ind+1
コード例 #2
0
ファイル: HSICTestObject.py プロジェクト: oxmlcs/kerpy
 def compute_induced_kernel_matrix_on_data(self,data_x,data_y):
     '''Z follows the same distribution as X; W follows that of Y.
     The current data generating methods we use 
     generate X and Y at the same time. '''
     size_induced_set = max(self.num_inducex,self.num_inducey)
     #print "size_induce_set", size_induced_set
     if self.data_generator is None:
         subsample_idx = np.random.randint(self.num_samples, size=size_induced_set)
         self.data_z = data_x[subsample_idx,:]
         self.data_w = data_y[subsample_idx,:]
     else:
         self.data_z, self.data_w = self.data_generator(size_induced_set)
         self.data_z[[range(self.num_inducex)],:]
         self.data_w[[range(self.num_inducey)],:]
     #print 'Induce Set'
     if self.kernelX_use_median:
         sigmax = self.kernelX.get_sigma_median_heuristic(data_x)
         self.kernelX.set_width(float(sigmax))
     if self.kernelY_use_median:
         sigmay = self.kernelY.get_sigma_median_heuristic(data_y)
         self.kernelY.set_width(float(sigmay))
     Kxz = self.kernelX.kernel(data_x,self.data_z)
     Kzz = self.kernelX.kernel(self.data_z)
     #R = inv(sqrtm(Kzz))
     R = inv(sqrtm(Kzz + np.eye(np.shape(Kzz)[0])*10**(-6)))
     phix = Kxz.dot(R)
     Kyw = self.kernelY.kernel(data_y,self.data_w)
     Kww = self.kernelY.kernel(self.data_w)
     #S = inv(sqrtm(Kww))
     S = inv(sqrtm(Kww + np.eye(np.shape(Kww)[0])*10**(-6)))
     phiy = Kyw.dot(S)
     return phix, phiy
コード例 #3
0
ファイル: pca.py プロジェクト: molmod/yaff
def pca_similarity(covar_a, covar_b):
    """
        Calculates the similarity between the two covariance matrices

        **Arguments:**

        covar_a
            The first covariance matrix.

        covar_b
            The second covariance matrix.
    """
    # Take the square root of the symmetric matrices
    a_sq = spla.sqrtm(covar_a)
    b_sq = spla.sqrtm(covar_b)

    # Check for imaginary entries
    for mat in [a_sq, b_sq]:
        max_imag = np.amax(np.abs(np.imag(mat)))
        mean_real = np.mean(np.abs(np.real(mat)))
        if(max_imag/mean_real > 1e-6):
            Warning('Covariance matrix is not diagonally dominant')

    # Return the PCA similarity (1 - PCA distance)
    return 1 - np.sqrt(np.trace(np.dot(a_sq-b_sq, a_sq-b_sq))/(np.trace(covar_a+covar_b)))
コード例 #4
0
ファイル: rcca_test.py プロジェクト: cvpapero16/pose_cca
def rcca(X, Y, reg):   
    X = np.array(X)
    Y = np.array(Y)
    n, p = X.shape
    n, q = Y.shape
        
    # zero mean
    X = X - X.mean(axis=0)
    Y = Y - Y.mean(axis=0)
        
    # covariances
    S = np.cov(X.T, Y.T, bias=1)
    
    SXX = S[:p,:p]
    SYY = S[p:,p:]
    SXY = S[:p,p:]

    ux, lx, vxt = SLA.svd(SXX)
    #uy, ly, vyt = SLA.svd(SYY)
    
    #print lx
    #正則化
    Rg = np.diag(np.ones(p)*reg)
    SXX = SXX + Rg
    SYY = SYY + Rg

    sqx = SLA.sqrtm(SLA.inv(SXX)) # SXX^(-1/2)
    sqy = SLA.sqrtm(SLA.inv(SYY)) # SYY^(-1/2)
    M = np.dot(np.dot(sqx, SXY), sqy.T) # SXX^(-1/2) * SXY * SYY^(-T/2)
    A, r, Bh = SLA.svd(M, full_matrices=False)
    B = Bh.T      
    #r = self.reg*r
    return r[0], lx[0], lx[0] 
コード例 #5
0
ファイル: quad_form.py プロジェクト: BvanP/cvxpy
def quad_form(x, P):
    x,P = map(Expression.cast_to_const, (x,P))
    # Check dimensions.
    n = P.size[0]
    if P.size[1] != n or x.size != (n,1):
        raise Exception("Invalid dimensions for arguments.")
    if x.curvature.is_constant():
        return x.T*P*x
    elif P.curvature.is_constant():
        np_intf = intf.get_matrix_interface(np.ndarray)
        P = np_intf.const_to_matrix(P.value)
        # Replace P with symmetric version.
        P = (P + P.T)/2
        # Check if P is PSD.
        eigvals = LA.eigvalsh(P)
        if min(eigvals) > 0:
            P_sqrt = Constant(LA.sqrtm(P).real)
            return square(norm2(P_sqrt*x))
        elif max(eigvals) < 0:
            P_sqrt = Constant(LA.sqrtm(-P).real)
            return -square(norm2(P_sqrt*x))
        else:
            raise Exception("P has both positive and negative eigenvalues.")
    else:
        raise Exception("At least one argument to quad_form must be constant.")
コード例 #6
0
ファイル: dcca_numpy.py プロジェクト: Chrisdy/deepcca
def cca(H1, H2):
    H1bar = copy.deepcopy(H1)
    H1bar = H1bar-H1bar.mean(axis=0)
    H2bar = copy.deepcopy(H2)
    H2bar = H2bar-H2bar.mean(axis=0)
    H1bar = H1bar.T
    H2bar = H2bar.T
    H1bar += np.random.random(H1bar.shape)*0.00001
    H2bar += np.random.random(H2bar.shape)*0.00001
    r1 = 0.00000001
    m = H1.shape[0]
    #H1bar = H1 - (1.0/m)*np.dot(H1, np.ones((m,m), dtype=np.float32))
    #H2bar = H2 - (1.0/m)*np.dot(H2, np.ones((m,m), dtype=np.float32))
    SigmaHat12 = (1.0/(m-1))*np.dot(H1bar, H2bar.T)
    SigmaHat11 = (1.0/(m-1))*np.dot(H1bar, H1bar.T)
    SigmaHat11 = SigmaHat11 + r1*np.identity(SigmaHat11.shape[0], dtype=np.float32)
    SigmaHat22 = (1.0/(m-1))*np.dot(H2bar, H2bar.T)
    SigmaHat22 = SigmaHat22 + r1*np.identity(SigmaHat22.shape[0], dtype=np.float32)
    SigmaHat11_2=mat_pow(SigmaHat11).real.astype(np.float32)
    SigmaHat22_2=mat_pow(SigmaHat22).real.astype(np.float32)
    ##TMP = np.dot(SigmaHat12, SigmaHat22_2) #unstable
    TMP2 = stable_inverse_A_dot_Bneg1(SigmaHat12, sqrtm(SigmaHat22))#np.dot(SigmaHat12, SigmaHat22_2)
    TMP3 = stable_inverse_A_dot_Bneg1_cholesky(SigmaHat12, sqrtm(SigmaHat22))#np.dot(SigmaHat12, SigmaHat22_2)

    ##Tval = np.dot(SigmaHat11_2, TMP) #unstable
    Tval = stable_inverse_Aneg1_dot_B(sqrtm(SigmaHat11), TMP2)
    Tval3 = stable_inverse_Aneg1_dot_B_cholesky(sqrtm(SigmaHat11), TMP3)

    ##U, D, V, = np.linalg.svd(Tval)

    ## corr =  np.trace(np.dot(Tval.T, Tval))**(0.5) #wrong
    corr =  np.trace(sqrtm(np.dot(Tval.T, Tval)))
    return corr
コード例 #7
0
ファイル: cca4.py プロジェクト: cvpapero/canotest
    def dataNorm(self):
        SXX = np.cov(self.X)
        U, l, Ut = LA.svd(SXX, full_matrices=True) 
        H = np.dot(LA.sqrtm(LA.inv(np.diag(l))),Ut)
        self.nX = np.dot(H,self.X)

        #print np.cov(self.nX)
        #print "mean:"
        #print np.mean(self.nX)

        SYY = np.cov(self.Y)
        U, l, Ut = LA.svd(SYY, full_matrices=True) 
        H = np.dot(LA.sqrtm(LA.inv(np.diag(l))),Ut)
        #print "H"
        #print H
        self.nY = np.dot(H,self.Y)
        #print np.cov(self.nY)

        print "dataNorm_X:"
        for i in range(len(self.nX)):
            print(self.nX[i])
        print("---")

        print "dataNorm_Y:"
        for i in range(len(self.nY)):
            print(self.nY[i])
        print("---")
コード例 #8
0
def gsvd(a, m, w):
	"""
	:param a: Matrix to GSVD
	:param m: 1st Constraint, (u.T * m * u) = I
	:param w: 2nd Constraint, (v.T * w * v) = I
	:return: (u ,s, v)
	"""

	(aHeight, aWidth) = a.shape
	(mHeight, mWidth) = m.shape
	(wHeight, mWidth) = w.shape

	assert(aHeight == mHeight)
	assert(aWidth == mWidth)

	mSqrt = sqrtm(m)
	wSqrt = sqrtm(w)


	mSqrtInv = np.linalg.inv(mSqrt)
	wSqrtInv = np.linalg.inv(wSqrt)

	_a = np.dot(np.dot(mSqrt, a), wSqrt)

	(_u, _s, _v) = np.linalg.svd(_a)

	u = np.dot(mSqrtInv, _u)
	v = np.dot(wSqrtInv, _v.T).T
	s = _s

	return (u, s, v)
コード例 #9
0
ファイル: testMSI.py プロジェクト: fferrara/pyassistive
def MSI(X, Y):
    try: 
        n, p = X.shape
        n, q = Y.shape
    except ValueError as v:
        return []

    X = X.astype('float32', copy=False)
    X -= X.mean(axis=0)
    X /= np.max(np.abs(X))
    Y = Y.astype('float32', copy=False)
    Y -= Y.mean(axis=0)
    Y /= np.max(np.abs(Y))

    C = np.cov(X.T, Y.T, bias=1)
    CXX = C[:p,:p]
    CYY = C[p:,p:]

    sqx,_ = LA.sqrtm(LA.inv(CXX),False) # SXX^(-1/2)
    sqy,_ = LA.sqrtm(LA.inv(CYY),False) # SYY^(-1/2)

    # build square matrix
    u1 = np.vstack((sqx, np.zeros((sqy.shape[0], sqx.shape[1]))))
    u2 = np.vstack((np.zeros((sqx.shape[0], sqy.shape[1])), sqy))
    U = np.hstack((u1, u2))
    
    R = np.dot(np.dot(U, C), U.T)

    eigvals = LA.eigh(R)[0]
    eigvals /= np.sum(eigvals)
    # Compute index
    return 1 + np.sum(eigvals * np.log(eigvals)) / np.log(eigvals.shape[0])
コード例 #10
0
ファイル: pose_cca2.py プロジェクト: cvpapero16/pose_cca
    def cca(self, X, Y):
        '''
        正準相関分析
        http://en.wikipedia.org/wiki/Canonical_correlation
        '''    
        #X = np.array(X)
        #Y = np.array(Y)
        n, p = X.shape
        n, q = Y.shape
        
        # zero mean
        X = X - X.mean(axis=0)
        Y = Y - Y.mean(axis=0)
        
        # covariances
        S = np.cov(X.T, Y.T, bias=1)
        
        SXX = S[:p,:p]
        SYY = S[p:,p:]
        SXY = S[:p,p:]

        #正則化
        SXX = self.add_reg(SXX, self.reg) 
        SYY = self.add_reg(SYY, self.reg)

        sqx = SLA.sqrtm(SLA.inv(SXX)) # SXX^(-1/2)
        sqy = SLA.sqrtm(SLA.inv(SYY)) # SYY^(-1/2)
        M = np.dot(np.dot(sqx, SXY), sqy.T) # SXX^(-1/2) * SXY * SYY^(-T/2)
        A, r, Bh = SLA.svd(M, full_matrices=False)
        B = Bh.T      
        #r = self.reg*r
        return r, A, B
コード例 #11
0
def calc_vecp(l,C_l_hat,C_fl, C_l):

    C_fl_12 = sqrtm(C_fl[l])
    C_l_inv = LA.inv(C_l[l])
    C_l_inv_12= sqrtm(C_l_inv)
    # the order is inverted compared to matlab hamimeche_lewis_likelihood.m line 19

    # line 20 of hamimeche_lewis_likelihood.m
    res = np.dot(C_l_inv_12, np.dot(C_l_hat[l], C_l_inv_12))
    [d, u] = LA.eigh(res)
    d = np.diag(d)  # noticed that python returns the eigenvalues as a vector, not a matrix
    #np. dot( u, np.dot( np.diag(d), LA.inv(u))) should be equals to res
    # real symmetric matrices are diagnalized by orthogonal matrices (M^t M = 1)

    # this makes a diagonal matrix by applying g(x) to the eigenvalues, equation 10 in Barkats et al
    gd = np.sign(np.diag(d) - 1) * np.sqrt(2 * (np.diag(d) - np.log(np.diag(d)) - 1))
    gd = np.diag(gd)
    # Argument of vecp in equation 8; multiplying from right to left
    X = np.dot(np.transpose(u), C_fl_12)
    X = np.dot(gd, X)
    X = np.dot(u, X)
    X = np.dot(C_fl_12, X)
    # This is the vector of equation 7
    X = vecp(X)

    return X
コード例 #12
0
ファイル: roscca3.py プロジェクト: cvpapero/rqt_cca
    def stdNorm(self, U1, U2):
        print "U1"
        print U1
        print "U2"
        print U2

        mat1 = np.matrix(U1).T
        print mat1
        print mat1.mean(axis=1)
        mat1 = mat1 - mat1.mean(axis=1)
        print mat1
        mat1cov = np.cov(mat1)
        print mat1cov
        p1,l1,p1t = NLA.svd(mat1cov)
        print p1
        print l1
        print p1t
        l1sq = SLA.sqrtm(SLA.inv(np.diag(l1))) 
        snU1 =  np.dot(np.dot(l1sq, p1.T), mat1)

        mat2 = np.matrix(U2).T
        mat2 = mat2 - mat2.mean(axis=1)
        mat2cov = np.cov(mat2)
        p2,l2,p2t = NLA.svd(mat2cov)
        l2sq = SLA.sqrtm(SLA.inv(np.diag(l2))) 
        snU2 =  np.dot(np.dot(l2sq, p2.T), mat2)

        print "cov:"
        print np.cov(snU1)
        print np.cov(snU2)

        return snU1, snU2
コード例 #13
0
ファイル: body_cca_mi.py プロジェクト: cvpapero/rqt_cca
 def cca1(self, X, Y):
     '''
     正準相関分析
     http://en.wikipedia.org/wiki/Canonical_correlation
     '''    
     X = np.array(X)
     Y = np.array(Y)
     n, p = X.shape
     n, q = Y.shape
     
     # zero mean
     X = X - X.mean(axis=0)
     Y = Y - Y.mean(axis=0)
     
     # covariances
     S = np.cov(X.T, Y.T)
     
     # S = np.corrcoef(X.T, Y.T)
     SXX = S[:p,:p]
     SYY = S[p:,p:]
     SXY = S[:p,p:]
     SYX = S[p:,:p]
     
     # 
     sqx = SLA.sqrtm(SLA.inv(SXX)) # SXX^(-1/2)
     sqy = SLA.sqrtm(SLA.inv(SYY)) # SYY^(-1/2)
     M = np.dot(np.dot(sqx, SXY), sqy.T) # SXX^(-1/2) * SXY * SYY^(-T/2)
     A, s, Bh = SLA.svd(M, full_matrices=False)
     B = Bh.T      
     #print np.dot(np.dot(A[:,0].T,SXX),A[:,0])
     return s, A, B
コード例 #14
0
ファイル: test_cano.py プロジェクト: cvpapero/canotest
def kcca(X, Y, kernel_x=gaussian_kernel, kernel_y=gaussian_kernel, eta=1.0):
    '''
    カーネル正準相関分析
    http://staff.aist.go.jp/s.akaho/papers/ibis00.pdf
    '''
    n, p = X.shape
    n, q = Y.shape

    Kx = DIST.squareform(DIST.pdist(X, kernel_x))
    Ky = DIST.squareform(DIST.pdist(Y, kernel_y))
    J = np.eye(n) - np.ones((n, n)) / n
    M = np.dot(np.dot(Kx.T, J), Ky) / n
    L = np.dot(np.dot(Kx.T, J), Kx) / n + eta * Kx
    N = np.dot(np.dot(Ky.T, J), Ky) / n + eta * Ky

    sqx = LA.sqrtm(LA.inv(L))
    sqy = LA.sqrtm(LA.inv(N))

    a = np.dot(np.dot(sqx, M), sqy.T)
    A, s, Bh = LA.svd(a, full_matrices=False)
    B = Bh.T

    # U = np.dot(np.dot(A.T, sqx), X).T
    # V = np.dot(np.dot(B.T, sqy), Y).T

    return s, A, B
コード例 #15
0
ファイル: css2012Funcs.py プロジェクト: snowdj/css2012
def gibbs1_swr(S0, P0, P1, T):
    """
    function SA = GIBBS1_SWR(S0,P0,P1,T);

    This file executes the Carter-Kohn backward sampler for the
    Stock-Watson-Romer model.

    S0, P0, P1 are outputs of the forward Kalman filter

    VERIFIED (1x) SL (8-9-13)
    """
    A = np.array([[0., 1.], [0., 1.]])

    # initialize arrays for Gibbs sampler
    SA = zeros((2, T))  # artificial states
    SM = zeros((2, 1))  # backward update for conditional mean of state vector
    PM = zeros((2, 2))  # backward update for projection matrix
    P = zeros((2, 2))  # backward update for conditional variance matrix
    wa = np.random.randn(2, T)  # draws for state innovations

    # Backward recursions and sampling
    # Terminal state
    SA[:, T-1] = S0[:, T-1] + np.real(sqrtm(P0[:, :, T-1])).dot(wa[:, T-1])

    # iterating back through the rest of the sample
    for i in range(1, T):
        PM = np.dot(P0[:, :, T-i].dot(A.T), inv(P1[:, :, T-i]))
        P = P0[:, :, T-i] - np.dot(PM.dot(A), P0[:, :, T-i])
        SM = S0[:, T-i-1] + PM.dot(SA[:, T-i] - A.dot(S0[:, T-i-1]))
        SA[:, T-i-1] = SM + np.real(sqrtm(P)).dot(wa[:, T-i-1])

    return SA
コード例 #16
0
def BipartiteGraphClusteringK(dist_mat,prefix1,prefix2,K,thresh):
    # build the graph
    G,Local_Seq,Long_Seq = GraphBuild(dist_mat)
    # compute graph laplacian spectrum
    U,S,V,D1,D2 = GraphSVD(G,Local_Seq,Long_Seq)
    # update K if necessary
    #if ((S>0.7).sum()<K):
    #    K = (S>0.7).sum()
    #
    # data matrix
    Z0 = np.round(la.sqrtm(la.inv(D1))*U[:,np.arange(K)],8)
    Z1 = np.round(la.sqrtm(la.inv(D2))*V[:,np.arange(K)],8)
    # compute the centroid
    estimator = KMeans(init='k-means++', n_clusters=K, n_init=10)
    estimator.fit(Z0)
    B = estimator.cluster_centers_
    # compute the distance between long seq and centroid
    Y = cdist(Z1,B)
    # compute the assignment of long seq
    C = Y.argmin(axis=1)
    # output the pairs of long-seqs
    for k in np.arange(K):
        print "#%d"%k + "\t" + ":".join(['%.2f']*len(S)) % tuple(S)
        I = np.where(C==k)[0]
        for a in Long_Seq[I]:
            print a
コード例 #17
0
    def test_sqrtm_type_preservation_and_conversion(self):
        # The sqrtm matrix function should preserve the type of a matrix
        # whose eigenvalues are nonnegative with zero imaginary part.
        # Test this preservation for variously structured matrices.
        complex_dtype_chars = ('F', 'D', 'G')
        for matrix_as_list in (
                [[1, 0], [0, 1]],
                [[1, 0], [1, 1]],
                [[2, 1], [1, 1]],
                [[2, 3], [1, 2]],
                [[1, 1], [1, 1]]):

            # check that the spectrum has the expected properties
            W = scipy.linalg.eigvals(matrix_as_list)
            assert_(not any(w.imag or w.real < 0 for w in W))

            # check float type preservation
            A = np.array(matrix_as_list, dtype=float)
            A_sqrtm, info = sqrtm(A, disp=False)
            assert_(A_sqrtm.dtype.char not in complex_dtype_chars)

            # check complex type preservation
            A = np.array(matrix_as_list, dtype=complex)
            A_sqrtm, info = sqrtm(A, disp=False)
            assert_(A_sqrtm.dtype.char in complex_dtype_chars)

            # check float->complex type conversion for the matrix negation
            A = -np.array(matrix_as_list, dtype=float)
            A_sqrtm, info = sqrtm(A, disp=False)
            assert_(A_sqrtm.dtype.char in complex_dtype_chars)
コード例 #18
0
ファイル: nds.py プロジェクト: tsmithe/learning-nds
    def __init__(self, F, H, R_v, R_n, x_hat_0, P_0, alpha=1):
        """
        x_hat_0 and P_0 are initial estimates of the state mean and covariance.
        alpha determines the spread of sigma points, and should be small...
        """
        self.F = F
        self.H = H
        self.R_v = R_v
        self.R_n = R_n
        self.x_hat = x_hat_0
        self.P = P_0
        self.S = spla.sqrtm(P_0) #np.linalg.cholesky(P_0)
        self.sqrt_R_v = spla.sqrtm(R_v) #np.linalg.cholesky(R_v)
        self.sqrt_R_n = spla.sqrtm(R_n) #np.linalg.cholesky(R_n)
        self.L = len(x_hat_0)
        self.M = len(H(x_hat_0))
        self.alpha = alpha
        self.beta = 2
        kappa = 3 - self.L
        self.kappa = kappa
        self.Lambda = alpha**2 * (self.L + kappa) - self.L

        # NB: these don't change while we don't augment the sigma points
        self.weights_m = self.weights_m()
        self.weights_c = self.weights_c()
コード例 #19
0
ファイル: pose_cca.py プロジェクト: cvpapero16/pose_cca
    def kcca(self, X, Y, kernel_x=gaussian_kernel, kernel_y=gaussian_kernel, eta=1.0):
        n, p = X.shape
        n, q = Y.shape
        
        Kx = DIST.squareform(DIST.pdist(X, kernel_x))
        Ky = DIST.squareform(DIST.pdist(Y, kernel_y))
        J = np.eye(n) - np.ones((n, n)) / n
        M = np.dot(np.dot(Kx.T, J), Ky) / n
        L = np.dot(np.dot(Kx.T, J), Kx) / n + eta * Kx
        N = np.dot(np.dot(Ky.T, J), Ky) / n + eta * Ky


        sqx = SLA.sqrtm(SLA.inv(L))
        sqy = SLA.sqrtm(SLA.inv(N))
        
        a = np.dot(np.dot(sqx, M), sqy.T)
        A, s, Bh = SLA.svd(a, full_matrices=False)
        B = Bh.T
        
        # U = np.dot(np.dot(A.T, sqx), X).T
        # V = np.dot(np.dot(B.T, sqy), Y).T
        print s.shape
        print A.shape
        print B.shape
        return s, A, B
コード例 #20
0
def GraphSVD(G,row_nodes,column_nodes):
    A = bipartite.biadjacency_matrix(G,row_order=row_nodes,column_order=column_nodes)
    D1 = np.diag(np.squeeze(np.asarray(A.sum(1))))
    D2 = np.diag(np.squeeze(np.asarray(A.sum(0))))
    An = la.sqrtm(la.inv(D1))*A*la.sqrtm(la.inv(D2))
    U,S,V = np.linalg.svd(An)
    V = V.T
    return (U,S,V,D1,D2)
コード例 #21
0
ファイル: calc_cca2.py プロジェクト: cvpapero/bodys_cca
    def cca(self, X, Y, id1, id2):
        '''
        正準相関分析
        http://en.wikipedia.org/wiki/Canonical_correlation
        '''    
        #X = np.array(X)
        #Y = np.array(Y)
        #print X.shape
        n, p = X.shape
        n, q = Y.shape
        
        # zero mean
        X = X - X.mean(axis=0)
        Y = Y - Y.mean(axis=0)
        
        # covariances
        S = np.cov(X.T, Y.T, bias=1)
        
        # S = np.corrcoef(X.T, Y.T)
        SXX = S[:p,:p]
        SYY = S[p:,p:]
        SXY = S[:p,p:]
        #SYX = S[p:,:p]
        
        #正則化?
        #Rg = np.diag(np.ones(p)*0.001)
        #SXX = SXX + Rg
        #SYY = SYY + Rg
        #
        sqx = SLA.sqrtm(SLA.inv(SXX)) # SXX^(-1/2)
        sqy = SLA.sqrtm(SLA.inv(SYY)) # SYY^(-1/2)
        M = np.dot(np.dot(sqx, SXY), sqy.T) # SXX^(-1/2) * SXY * SYY^(-T/2)
        A, s, Bh = SLA.svd(M, full_matrices=False)
        B = Bh.T      
        
        """
        vecs = []
        ids = [id1, id2]
        for idx in ids:
            wx = []

            for n in range(self.dtd):
                print "i",i,"n",n
                if n == idx[n]:
                    wx[n] = A[:,n]
                else:
                    wx[n] = np.zeros(self.dtd)
            vecs.append(np.array(wx).T)
        """
        #print np.dot(np.dot(A[:,0].T,SXX),A[:,0])
        z = p if p < q else q
        r = np.zeros([self.dtd])
        wx = np.zeros([self.dtd,self.dtd])
        wy = np.zeros([self.dtd,self.dtd])
        r[:z]=s
        wx[id1,:z] = A
        wy[id2,:z] = B
        return r,wx,wy
コード例 #22
0
ファイル: GOBLin.py プロジェクト: mpearmain/CoLinUCB_Revised
	def __init__(self, featureDimension, lambda_, userNum, W):
		self.W = W
		self.userNum = userNum
		self.A = lambda_*np.identity(n = featureDimension*userNum)
		self.b = np.zeros(featureDimension*userNum)
		self.AInv = np.linalg.inv(self.A)

		self.theta = np.dot(self.AInv , self.b)
		self.STBigWInv = sqrtm( np.linalg.inv(np.kron(W, np.identity(n=featureDimension))) )
		self.STBigW = sqrtm(np.kron(W, np.identity(n=featureDimension)))
コード例 #23
0
 def test_blocksizes(self):
     # Make sure I do not goof up the blocksizes when they do not divide n.
     np.random.seed(1234)
     for n in range(1, 8):
         A = np.random.rand(n, n) + 1j*np.random.randn(n, n)
         A_sqrtm_default, info = sqrtm(A, disp=False, blocksize=n)
         assert_allclose(A, np.linalg.matrix_power(A_sqrtm_default, 2))
         for blocksize in range(1, 10):
             A_sqrtm_new, info = sqrtm(A, disp=False, blocksize=blocksize)
             assert_allclose(A_sqrtm_default, A_sqrtm_new)
コード例 #24
0
ファイル: fid_score.py プロジェクト: kazk1018/manifold_mixup
def calculate_frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-6):
    """Numpy implementation of the Frechet Distance.
    The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)
    and X_2 ~ N(mu_2, C_2) is
            d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).

    Stable version by Dougal J. Sutherland.

    Params:
    -- mu1   : Numpy array containing the activations of a layer of the
               inception net (like returned by the function 'get_predictions')
               for generated samples.
    -- mu2   : The sample mean over activations, precalculated on an 
               representive data set.
    -- sigma1: The covariance matrix over activations for generated samples.
    -- sigma2: The covariance matrix over activations, precalculated on an 
               representive data set.

    Returns:
    --   : The Frechet Distance.
    """

    mu1 = np.atleast_1d(mu1)
    mu2 = np.atleast_1d(mu2)

    sigma1 = np.atleast_2d(sigma1)
    sigma2 = np.atleast_2d(sigma2)

    assert mu1.shape == mu2.shape, \
        'Training and test mean vectors have different lengths'
    assert sigma1.shape == sigma2.shape, \
        'Training and test covariances have different dimensions'

    diff = mu1 - mu2

    # Product might be almost singular
    covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False)
    if not np.isfinite(covmean).all():
        msg = ('fid calculation produces singular product; '
               'adding %s to diagonal of cov estimates') % eps
        print(msg)
        offset = np.eye(sigma1.shape[0]) * eps
        covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset))

    # Numerical error might give slight imaginary component
    if np.iscomplexobj(covmean):
        if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3):
            m = np.max(np.abs(covmean.imag))
            raise ValueError('Imaginary component {}'.format(m))
        covmean = covmean.real

    tr_covmean = np.trace(covmean)

    return (diff.dot(diff) + np.trace(sigma1) +
            np.trace(sigma2) - 2 * tr_covmean)
コード例 #25
0
ファイル: channels.py プロジェクト: veeresht/CommPy
    def propagate(self, msg):

        """
        Propagates a message through the channel.

        Parameters
        ----------
        msg : 1D ndarray
                Message to propagate.

        Returns
        -------
        channel_output : 2D ndarray
                         Message after application of the fading and addition of noise.
                         channel_output[i] is th i-th received symbol of size nb_rx.

        Raises
        ------
        TypeError
                        If the input message is complex but the channel is real.

        AssertionError
                        If the noise standard deviation noise_std as not been set yet.
        """

        if isinstance(msg[0], complex) and not self.isComplex:
            raise TypeError('Trying to propagate a complex message in a real channel.')
        (nb_vect, mod) = divmod(len(msg), self.nb_tx)

        # Add padding if required
        if mod:
            msg = hstack((msg, zeros(self.nb_tx - mod)))
            nb_vect += 1

        # Reshape msg as vectors sent on each antennas
        msg = msg.reshape(nb_vect, -1)

        # Generate noises
        self.generate_noises((nb_vect, self.nb_rx))

        # Generate channel uncorrelated channel
        dims = (nb_vect, self.nb_rx, self.nb_tx)
        if self.isComplex:
            self.channel_gains = (standard_normal(dims) + 1j * standard_normal(dims)) * sqrt(0.5)
        else:
            self.channel_gains = standard_normal(dims)

        # Add correlation and mean
        einsum('ij,ajk,lk->ail', sqrtm(self.fading_param[2]), self.channel_gains, sqrtm(self.fading_param[1]),
               out=self.channel_gains, optimize='greedy')
        self.channel_gains += self.fading_param[0]

        # Generate outputs
        self.unnoisy_output = einsum('ijk,ik->ij', self.channel_gains, msg)
        return self.unnoisy_output + self.noises
コード例 #26
0
ファイル: vlad.py プロジェクト: nutszebra/ddp
def calcMeanOnSPD(p, q):
  if len(p.shape)!=1:
    sq = sqrtm(p)
    invsq = pinv2(sq)
    F = sqrtm(np.dot(np.dot(sq, q), sq))
    return np.dot(np.dot(invsq,F),invsq)
  else:
    sq = p**0.5
    invsq = 1.0 / sq
    F = (sq * q * sq) **0.5
    return invsq * F * invsq
コード例 #27
0
ファイル: portfoliostats.py プロジェクト: venuur/dissertation
def smoothed_bootstrap(f, x, mu, sigma, size):
    """Calculate expected value of function f using a smoothed bootstrap
    from samples x, with sample mean mu and smoothing covariance sigma."""

    n = x.shape[1]
    sigma_root = spla.sqrtm(sigma)
    scaling_matrix = la.inv(spla.sqrtm(np.eye(n) + sigma))
    x_sample = [smoothed_bootstrap_sample(x, mu, sigma, sigma_root, scaling_matrix) for i in range(size)]
    f_sample = [f(xi) for xi in x_sample]
    f_mean = np.mean(f_sample, axis=0)

    return f_mean
コード例 #28
0
ファイル: kdtools.py プロジェクト: jarichardson/kdtools
def KD(bandwidth,coords,gridRange,gridRes,weights=[]):
	'''
	Estimates point density using:
	bandwidth - a kernel bandwidth (2x2 covariance	matrix)
	coords    - 2xN list of coordinates for N points.
	gridRange    - a 2x2 [[W,E],[S,N]] array
	gridRes  - a 1x2 [X-resolution,Y-resolution] array
	weights   - a 2xN list of wieghts for N points [None]
	
	Outputs X,Y,D: Eastings, Northings, and Densities in a Meshgrid
	format (i.e. X will be tiled, Y will be repeated)
	'''
	
	#If weights are given, test to see that they're valid
	if weights != []:
		if numpy.shape(weights)[0] != numpy.shape(coords)[0]:
			print "error: weight array not same length as coordinate array!"
			print "  cannot create kernel density map."
			return None
	#If weights are not given, make weights even across the board
	else:
		weights = numpy.ones(len(coords))
	
	weightaverage = numpy.sum(weights)/len(weights)
	
	detH = linalg.det(linalg.sqrtm(bandwidth)) #determinate sqrt bandwidth
	invH = linalg.inv(linalg.sqrtm(bandwidth)) #inverse sqrt bandwidth
	
	#constant variable in gaussian pdf
	constant = 2.0*numpy.pi*detH*len(coords) * weightaverage

	#define map grid
	x = numpy.arange(gridRange[0][0],(gridRange[0][1]+gridRes[0]),gridRes[0])
	y = numpy.arange(gridRange[1][0],(gridRange[1][1]+gridRes[1]),gridRes[1])
	X,Y = numpy.meshgrid(x,y)	#X and Y are now tiled to grid
	D = numpy.zeros(numpy.shape(X)) #Density Grid
	dist = numpy.zeros(numpy.shape(X)) #distance matrix grid
	
	#Three for loop with enumerates... Nick Voss would be proud.
	for w,v in enumerate(coords):
		for i,e in enumerate(x):
			for j,n in enumerate(y):
				dx = e-v[0]
				dy = n-v[1]
				dxdy = numpy.dot(invH,numpy.array([[dx],[dy]]))
				dist[j][i] = numpy.dot(numpy.transpose(dxdy),dxdy)[0][0]
		D += numpy.exp(-0.5 * dist) * weights[w]
	
	D /= constant #normalize
	
	return X,Y,D
コード例 #29
0
ファイル: PALutils.py プロジェクト: jellis18/PAL
def createGHmatrix(toa, err, res, G, fidelity):
    """
    Create "H" compression matrix as defined in van Haasteren 2013(b).
    Multiplies with "G" matrix to create the "GH" matrix, which can simply replace
    the "G" matrix in all likelihoods which are marginalised over the timing-model


    @param toa: times-of-arrival (in days) for psr
    @param err: error bars on toas (in seconds)
    @param res: residuals (in seconds) of psr
    @param G: G matrix as defined in van Haasteren et al 2013(a)
    @param fidelity: fraction of total sensitivity retained in compressed data

    @return: GH matrix, which can simply replace "G" matrix in likelihood

    """

    # forming the error-bar covariance matrix, sandwiched with G matrices
    GCnoiseG = np.dot(G.T,np.dot(np.diag(err**2.0)*np.eye(len(err)),G))
    
    # forming the unscaled (Agwb=1) covariance matrix of GWB-induced residuals
    tm = createTimeLags(toa, toa)
    Cgwb = createRedNoiseCovarianceMatrix(tm, 1, 13/3)
    GCgwbG = np.dot(G.T, np.dot(Cgwb, G))
    
    # approximate the whitening matrix with the inverse root of the marginalised error-bar matrix
    CgwbMargWhite = np.dot(sl.sqrtm(sl.inv(GCnoiseG)).T, \
                    np.dot(GCgwbG, sl.sqrtm(sl.inv(GCnoiseG))))

    # compute the eigendecomposition of the 'whitened' GWB covariance matrix; 
    # order the eigenvalues largest first
    eigVal,eigVec = sl.eigh(CgwbMargWhite)
    idx = eigVal.argsort()[::-1] 
    eigVal = eigVal[idx]
    eigVec = eigVec[:,idx]
    
    # computing a rough estimate of the GWB amplitude for a strain-spectrum slope of -2/3
    sigma_gwb = np.std(res) * 1e-15
    Amp = (sigma_gwb/(1.37*(10**(-9)))) / (Tspan**(5/3))
    
    # looping over eigenvalues until the fidelity criterion of van Haasteren 2013(b) 
    # is satisfied; only the 'principal' eigenvectors are retained
    index = np.amax(np.where(np.cumsum((eigVal/(1+(Amp**2.0)*eigVal))**2.0)/ \
                             np.sum((eigVal/(1.0+(Amp**2.0)*eigVal))**2.0).real \
                             <= fidelity)[0]) 
    
    # forming the data-compression matrix
    H = np.dot(sl.sqrtm(sl.inv(GCnoiseG)).real,eigVec.T[:index+1].T.real)
    
    return np.dot(G,H)
コード例 #30
0
ファイル: test_matfuncs.py プロジェクト: hildensia/scipy
 def test_bad(self):
     # See http://www.maths.man.ac.uk/~nareports/narep336.ps.gz
     e = 2 ** -5
     se = sqrt(e)
     a = array([[1.0, 0, 0, 1], [0, e, 0, 0], [0, 0, e, 0], [0, 0, 0, 1]])
     sa = array([[1, 0, 0, 0.5], [0, se, 0, 0], [0, 0, se, 0], [0, 0, 0, 1]])
     n = a.shape[0]
     assert_array_almost_equal(dot(sa, sa), a)
     # Check default sqrtm.
     esa = sqrtm(a, disp=False, blocksize=n)[0]
     assert_array_almost_equal(dot(esa, esa), a)
     # Check sqrtm with 2x2 blocks.
     esa = sqrtm(a, disp=False, blocksize=2)[0]
     assert_array_almost_equal(dot(esa, esa), a)
def calculate_frechet(mu1, cov1, mu2, cov2):
    dmu = mu1 - mu2
    cov_mean, _ = linalg.sqrtm(cov1.dot(cov2), disp=False)
    frechet = dmu.dot(dmu) + np.trace(cov1 + cov2 - 2 * cov_mean)
    return frechet
コード例 #32
0
 def test_opposite_sign_complex_eigenvalues(self):
     M = [[2j, 4], [0, -2j]]
     R = [[1+1j, 2], [0, 1-1j]]
     assert_allclose(np.dot(R, R), M, atol=1e-14)
     assert_allclose(sqrtm(M), R, atol=1e-14)
コード例 #33
0
        for i in range(row):
            for j in range(col):
                Q_return[i, j] = (x_dir[i] * x_dir[j]) / (x[i] + x[j])
    else:
        row = np.shape(x)[0]
        col = np.shape(y)[0]
        Q_return = np.zeros((row, col))
        for i in range(row):
            for j in range(col):
                Q_return[i, j] = (x_dir[i] * y_dir[j]) / (x[i] - y[j])
    return Q_return


Qz = Q(RHPzeros, zero_dir)
Qp = Q(RHPpoles, pole_dir)
Qzp = Q(RHPzeros, zero_dir, RHPpoles, pole_dir)

A = sqrtm(nplinalg.inv(Qz)).dot(Qzp).dot(sqrtm(nplinalg.inv(Qp)))
_, sv, _ = nplinalg.svd(A)

M_Smin = sqrt(1 + max(np.abs(sv))**2)
print("M_Smin using eq 6.8 = ", np.round(M_Smin, 2))

# alternative because system has only one pole:
M_Smin = 1
for j in range(len(RHPzeros)):
    M_Smin *= np.abs(RHPzeros[j] + RHPpoles[0]) / np.abs(RHPzeros[j] -
                                                         RHPpoles[0])

print('MSmin from alternative calculation = ', M_Smin)
コード例 #34
0
ファイル: generate_data.py プロジェクト: zjminglead/py-glm
def make_covariance_matrix(n_features=15):
    A = np.random.normal(size=(n_features, n_features))
    A_sq = np.dot(A.T, A)
    return sqrtm(A_sq)
コード例 #35
0
 def test_gh7839(self):
     M = np.zeros((2, 2))
     R = np.zeros((2, 2))
     assert_allclose(np.dot(R, R), M, atol=1e-14)
     assert_allclose(sqrtm(M), R, atol=1e-14)
コード例 #36
0
def plotShape(kind, outline=False, title=True, save=False, **kwargs):
    f = mlab.figure(bgcolor=(1,1,1), size=(350,400))    
    spatialSize = float(kwargs['spatialSize'])  
    if outline:        
        for k in [-1,1]:
            for n in [-1,1]:
                L = [np.array([-1,1]), np.array([k,k]), np.array([n,n])]
                for d in range(3):
                    mlab.plot3d(spatialSize/2*L[d], spatialSize/2*L[(d+1)%3], spatialSize/2*L[(d+2)%3])        
    
    if kind == 'solid':
        vertA = kwargs['vertA']
        faceA = kwargs['faceA']
        mlab.triangular_mesh(vertA[:,0], vertA[:,1], vertA[:,2], faceA) 
    elif kind == 'transparent':
        vertA = kwargs['vertA']
        faceA = kwargs['faceA']
        mlab.triangular_mesh(vertA[:,0], vertA[:,1], vertA[:,2], faceA, opacity=0.1) 
    elif kind == 'wireframe':
        vertA = kwargs['vertA']
        faceA = kwargs['faceA']
        mlab.triangular_mesh(vertA[:,0], vertA[:,1], vertA[:,2], faceA, representation='wireframe') 
    elif kind in ["Bool",
                "ScalarArea",
                "AreaNormal",
                "QuadForm",
                "VertexAngularDefect",
                "EdgeAngularDefect"]:  
        mode = None
        features = np.array(kwargs['features'])
        x = kwargs['x']
        y = kwargs['y']
        z = kwargs['z']
        assert len(x) == len(features)  
        N = len(features)
        scalars = np.arange(N) # Key point: set an integer for each point
        colors = np.zeros((N, 4), dtype=np.uint8)        
        colors[:,-1] = 255 # No transparency
        
        if kind == 'Bool':
            colors[:,0] = 0        
            colors[:,1] = 255
            colors[:,2] = 0            
            pts = mlab.quiver3d(x-spatialSize/2, y-spatialSize/2+0.5, z-spatialSize/2+0.5, 
                            np.ones(N), np.zeros(N), np.zeros(N), 
                            scalars=scalars, mode='cube', scale_factor=0.7, line_width=10) 
        elif kind == 'ScalarArea':
            features = features.ravel()
            colors[:,2] = 255
            colors[:,1] = (255*(1-features/np.max(features))).astype(np.uint8)
            colors[:,0] = (255*(1-features/np.max(features))).astype(np.uint8)       
            pts = mlab.quiver3d(x-spatialSize/2, y-spatialSize/2+0.5, z-spatialSize/2+0.5, 
                            np.ones(N), np.zeros(N), np.zeros(N), 
                            scalars=scalars, mode='cube', scale_factor=0.7, line_width=10) 
        elif kind == 'AreaNormal':
            colors[:,2] = 255
            colors[:,1] = 0
            colors[:,0] = 0     
            pts = mlab.quiver3d(x-spatialSize/2+0.5, y-spatialSize/2+0.5, z-spatialSize/2+0.5, 
                        -features[:,0], -features[:,1], -features[:,2], 
                        scalars=scalars, mode='arrow', scale_factor=1.9, line_width=10) 
        elif kind == 'VertexAngularDefect':
            features = features.ravel()
            th = np.pi
            colors[:,0] = (255*(1+np.maximum(-1, np.clip(features, -th, 0)/th))).astype(np.uint8)
            colors[:,1] = (255*(1-np.minimum(1, np.clip(features, 0, th)/th)+
                               np.maximum(-1, np.clip(features, -th, 0)/th))).astype(np.uint8)
            colors[:,2] = (255*(1-np.minimum(1, np.clip(features, 0, th)/th))).astype(np.uint8)      
            pts = mlab.quiver3d(x-spatialSize/2, y-spatialSize/2+0.5, z-spatialSize/2+0.5, 
                        np.ones(N), np.zeros(N), np.zeros(N), 
                        scalars=scalars, mode='cube', scale_factor=0.7, line_width=10)  
        elif kind == 'EdgeAngularDefect':
            features = features.ravel()
            th = np.pi
            colors[:,0] = (255*(1+np.maximum(-1, np.clip(features, -th, 0)/th))).astype(np.uint8)
            colors[:,1] = (255*(1-np.minimum(1, np.clip(features, 0, th)/th)+
                               np.maximum(-1, np.clip(features, -th, 0)/th))).astype(np.uint8)
            colors[:,2] = (255*(1-np.minimum(1, np.clip(features, 0, th)/th))).astype(np.uint8)      
            pts = mlab.quiver3d(x-spatialSize/2, y-spatialSize/2+0.5, z-spatialSize/2+0.5, 
                        np.ones(N), np.zeros(N), np.zeros(N), 
                        scalars=scalars, mode='cube', scale_factor=0.7, line_width=10)  
        elif kind == 'QuadForm':
            mode = 'custom'
            r = 0.5
            phi, theta = np.mgrid[0:np.pi:11j, 0:2*np.pi:11j]
            XYZ = r*np.concatenate([(np.sin(phi)*np.cos(theta)).reshape((1, phi.shape[0], -1)),
                                    (np.sin(phi)*np.sin(theta)).reshape((1, phi.shape[0], -1)),
                                     np.cos(phi).reshape((1, phi.shape[0], -1))], axis=0)
            
            for n in range(N):
                Q = np.array([[features[n,0], features[n,3], features[n,4]],
                              [features[n,3], features[n,1], features[n,5]],
                              [features[n,4], features[n,5], features[n,2]]])
                Q = np.trace(Q)*np.eye(3)-Q+1e-3*np.eye(3)
                                              
                XYZ1 = np.tensordot(linalg.sqrtm(Q), XYZ, axes=(1,0))
                mlab.mesh(x[n]-spatialSize/2+0.5+XYZ1[0], 
                          y[n]-spatialSize/2+0.5+XYZ1[1], 
                          z[n]-spatialSize/2+0.5+XYZ1[2], color=(1,0,0)) 
        else:
            raise NotImplementedError         

        if mode != 'custom':
            pts.glyph.color_mode = 'color_by_scalar' 
            try:
                pts.module_manager.scalar_lut_manager.lut.table = colors 
            except:
                pass                              
        mlab.draw() 
    if title:
        mlab.title(kind, color=(0,0,0), size=0.7, height=0.75)
    
    if save:
        mlab.savefig('pics/'+kind+'.png')    
          
    return f
コード例 #37
0
    def do_when_triggered(self, run_context, run_values):
        tf_logging.info("trigger FrechetInceptionDistanceHook for " +
                        str(self._n_images) + " samples, network " +
                        self.network_name)
        #self.random_samples = self._model.generate(batch_size=self._n_batches)

        real_images_means = {}
        real_images_covs = {}
        sample_means = {}
        sample_covs = {}

        # mean over the other dataset_keys
        for dataset_str in self._datasets_keys:

            real_images_means[dataset_str], sample_means[
                dataset_str] = evaluate_means_over_dataset(
                    run_context.session,
                    self._ds_handle,
                    self._ds_initializers[dataset_str],
                    self._ds_handles[dataset_str], [
                        self._mean_values_real[dataset_str],
                        self._mean_values_sample[dataset_str]
                    ], [
                        self._mean_update_ops_real[dataset_str],
                        self._mean_update_ops_sample[dataset_str]
                    ], [
                        self._mean_reset_ops_real[dataset_str],
                        self._mean_reset_ops_sample[dataset_str]
                    ],
                    max_iterations=self._n_batches)

            real_images_covs[dataset_str], sample_covs[
                dataset_str] = evaluate_means_over_dataset(
                    run_context.session,
                    self._ds_handle,
                    self._ds_initializers[dataset_str],
                    self._ds_handles[dataset_str], [
                        self._cov_values_real[dataset_str],
                        self._cov_values_sample[dataset_str]
                    ], [
                        self._cov_update_ops_real[dataset_str],
                        self._cov_update_ops_sample[dataset_str]
                    ], [
                        self._cov_reset_ops_real[dataset_str],
                        self._cov_reset_ops_sample[dataset_str]
                    ],
                    max_iterations=self._n_batches)

            try:

                # compute fid
                diff = real_images_means[dataset_str] - sample_means[
                    dataset_str]

                # product might be almost singular
                covmean, _ = linalg.sqrtm(real_images_covs[dataset_str].dot(
                    sample_covs[dataset_str]),
                                          disp=False)
                if not np.isfinite(covmean).all():
                    msg = "fid calculation produces singular product; adding %s to diagonal of cov estimates" % eps
                    warnings.warn(msg)
                    offset = np.eye(
                        real_images_covs[dataset_str].shape[0]) * eps
                    covmean = linalg.sqrtm(
                        (real_images_covs[dataset_str] +
                         offset).dot(sample_covs[dataset_str] + offset))

                # numerical error might give slight imaginary component
                if np.iscomplexobj(covmean):
                    if not np.allclose(np.diagonal(covmean).imag, 0,
                                       atol=1e-3):
                        m = np.max(np.abs(covmean.imag))
                        raise ValueError("Imaginary component {}".format(m))

                    covmean = covmean.real

                tr_covmean = np.trace(covmean)

                fid = diff.dot(diff) + np.trace(
                    real_images_covs[dataset_str]) + np.trace(
                        sample_covs[dataset_str]) - 2 * tr_covmean

            except ValueError:
                print(
                    "Error in computing the FrechetInceptionDistance, most likely nunrical issues in computing the sqrt of a convariance matrix. Try increasing the number of samples"
                )
                fid = -1.

            self._tensors_values[dataset_str] = [[[fid]]]

        self.log_to_file_and_screen()
コード例 #38
0
def PEAK_MIMO(w_start, w_end, error_poles_direction, wr, deadtime_if=0):
    """
    This function is for multivariable system analysis of controllability.
    gives:
    minimum peak values on S and T with or without deadtime
    R is the expected worst case reference change, with condition that
    ||R||2<= 2 wr is the frequency up to where reference tracking is required
    enter value of 1 in deadtime_if if system has dead time

    Parameters
    ----------
    var : type
        Description (optional).

    Returns
    -------
    var : type
        Description.
    """

    # TODO use mimotf functions
    Zeros_G = zeros(G)
    Poles_G = poles(G)
    print('Poles: ', Zeros_G)
    print('Zeros: ', Poles_G)

    # Just to save unnecessary calculations that is not needed
    # Sensitivity peak of closed loop. eq 6-8 pg 224 skogestad

    if np.sum(Zeros_G) != 0:
        if np.sum(Poles_G) != 0:

            # Two matrices to save all the RHP zeros and poles directions
            yz_direction = np.matrix(
                np.zeros([G(0.001).shape[0], len(Zeros_G)]))
            yp_direction = np.matrix(
                np.zeros([G(0.001).shape[0], len(Poles_G)]))

            for i in range(len(Zeros_G)):

                [U, S,
                 V] = np.linalg.svd(G(Zeros_G[i] + error_poles_direction))
                yz_direction[:, i] = U[:, -1]

            for i in range(len(Poles_G)):
                # Error_poles_direction is to to prevent the numerical
                # method from breaking
                [U, S,
                 V] = np.linalg.svd(G(Poles_G[i] + error_poles_direction))
                yp_direction[:, i] = U[:, 0]

            yz_mat1 = (
                np.matrix(np.diag(Zeros_G)) *
                np.matrix(np.ones([len(Zeros_G), len(Zeros_G)])))
            yz_mat2 = yz_mat1.T

            Qz = (yz_direction.H * yz_direction) / (yz_mat1 + yz_mat2)

            yp_mat1 = np.matrix(np.diag(Poles_G)) * \
                      np.matrix(np.ones([len(Poles_G), len(Poles_G)]))
            yp_mat2 = yp_mat1.T

            Qp = (yp_direction.H * yp_direction) / (yp_mat1 + yp_mat2)

            yzp_mat1 = np.matrix(np.diag(Zeros_G)) * \
                       np.matrix(np.ones([len(Zeros_G), len(Poles_G)]))
            yzp_mat2 = np.matrix(np.ones([len(Zeros_G), len(Poles_G)])) * \
                       np.matrix(np.diag(Poles_G))

            Qzp = yz_direction.H * yp_direction / (yzp_mat1 - yzp_mat2)

            if deadtime_if == 0:
                # This matrix is the matrix from which the SVD is going to
                # be done to determine the final minimum peak
                pre_mat = (sc_lin.sqrtm((np.linalg.inv(Qz))) * Qzp *
                           (sc_lin.sqrtm(np.linalg.inv(Qp))))

                # Final calculation for the peak value
                Ms_min = np.sqrt(1 + (np.max(np.linalg.svd(pre_mat)[1]))**2)
                print('')
                print('Minimum peak values on T and S without deadtime')
                print('Ms_min = Mt_min = ', Ms_min)
                print('')

            # Skogestad eq 6-16 pg 226 using maximum deadtime per output
            # channel to give tightest lowest bounds
            if deadtime_if == 1:
                # Create vector to be used for the diagonal deadtime matrix
                # containing each outputs' maximum dead time
                # this would ensure tighter bounds on T and S
                # the minimum function is used because all stable systems
                # have dead time with a negative sign

                dead_time_vec_max_row = np.zeros(deadtime()[0].shape[0])

                for i in range(deadtime()[0].shape[0]):
                    dead_time_vec_max_row[i] = np.max(deadtime()[0][i, :])

                def Dead_time_matrix(s, dead_time_vec_max_row):

                    dead_time_matrix = np.diag(
                        np.exp(np.multiply(dead_time_vec_max_row, s)))
                    return dead_time_matrix

                Q_dead = np.zeros([G(0.0001).shape[0], G(0.0001).shape[0]])

                for i in range(len(Poles_G)):
                    for j in range(len(Poles_G)):
                        denominator_mat = (
                            (np.conjugate(yp_direction[:, i])) *
                            Dead_time_matrix(Poles_G[i], dead_time_vec_max_row)
                            * Dead_time_matrix(Poles_G[j],
                                               dead_time_vec_max_row) *
                            yp_direction[:, j]).T

                        numerator_mat = Poles_G[i] + Poles_G[i]

                        Q_dead[i, j] = denominator_mat / numerator_mat

                # Calculating the Mt_min with dead time
                lambda_mat = (sc_lin.sqrtm(np.linalg.pinv(Q_dead)) *
                              (Qp + Qzp * np.linalg.pinv(Qz) *
                               (np.transpose(np.conjugate(Qzp)))) *
                              sc_lin.sqrtm(np.linalg.pinv(Q_dead)))

                Ms_min = np.real(np.max(np.linalg.eig(lambda_mat)[0]))
                print('')
                print('Minimum peak values on T and S without dead time')
                print('Dead time per output channel is for the worst case '
                      'dead time in that channel')
                print('Ms_min = Mt_min = ', Ms_min)
                print('')

        else:
            print('')
            print('Minimum peak values on T and S')
            print('No limits on minimum peak values')
            print('')

    # Eq 6-48 pg 239 for plant with RHP zeros
    # Checking alignment of disturbances and RHP zeros
    RHP_alignment = [
        np.abs(
            np.linalg.svd(G(RHP_Z + error_poles_direction))[0][:, 0].H *
            np.linalg.svd(Gd(RHP_Z + error_poles_direction))[1][0] *
            np.linalg.svd(Gd(RHP_Z + error_poles_direction))[0][:, 0])
        for RHP_Z in Zeros_G
    ]

    print('Checking alignment of process output zeros to disturbances')
    print('These values should be less than 1')
    print(RHP_alignment)
    print('')

    # Checking peak values of KS eq 6-24 pg 229 np.linalg.svd(A)[2][:, 0]
    # Done with less tight lower bounds
    KS_PEAK = [
        np.linalg.norm(
            np.linalg.svd(G(RHP_p + error_poles_direction))[2][:, 0].H *
            np.linalg.pinv(G(RHP_p + error_poles_direction)), 2)
        for RHP_p in Poles_G
    ]
    KS_max = np.max(KS_PEAK)

    print('Lower bound on K')
    print('KS needs to larger than ', KS_max)
    print('')

    # Eq 6-50 pg 240 from Skogestad
    # Eg 6-50 pg 240 from Skogestad for simultanious disturbance matrix
    # Checking input saturation for perfect control for disturbance rejection
    # Checking for maximum disturbance just at steady state

    [U_gd, S_gd, V_gd] = np.linalg.svd(Gd(0.000001))
    y_gd_max = np.max(S_gd) * U_gd[:, 0]
    mod_G_gd_ss = np.max(np.linalg.inv(G(0.000001)) * y_gd_max)

    print('Perfect control input saturation from disturbances')
    print('Needs to be less than 1 ')
    print('Max Norm method')
    print('Checking input saturation at steady state')
    print('This is done by the worse output direction of Gd')
    print(mod_G_gd_ss)
    print('')

    print('Figure 1 is for perfect control for simultaneous disturbances')
    print('All values on each of the graphs should be smaller than 1')
    print('')

    print('Figure 2 is the plot of G**1 gd')
    print('The values of this plot needs to be smaller or equal to 1')
    print('')

    w = np.logspace(w_start, w_end, 100)

    mod_G_gd = np.zeros(len(w))
    mod_G_Gd = np.zeros([np.shape(G(0.0001))[0], len(w)])

    for i in range(len(w)):
        [U_gd, S_gd, V_gd] = np.linalg.svd(Gd(1j * w[i]))
        gd_m = np.max(S_gd) * U_gd[:, 0]
        mod_G_gd[i] = np.max(np.linalg.pinv(G(1j * w[i])) * gd_m)

        mat_G_Gd = np.linalg.pinv(G(w[i])) * Gd(w[i])
        for j in range(np.shape(mat_G_Gd)[0]):
            mod_G_Gd[j, i] = np.max(mat_G_Gd[j, :])

    # Def for subplotting all the possible variations of mod_G_Gd

    plot_freq_subplot(plt, w, np.ones([2, len(w)]), 'Perfect control Gd', 'r',
                      1)
    plot_freq_subplot(plt, w, mod_G_Gd, 'Perfect control Gd', 'b', 1)

    plt.figure(2)
    plt.title('Input Saturation for perfect control |inv(G)*gd|<= 1')
    plt.xlabel('w')
    plt.ylabel('|inv(G)* gd|')
    plt.semilogx(w, mod_G_gd)
    plt.semilogx([w[0], w[-1]], [1, 1])
    plt.semilogx(w[0], 1.1)

    print('Figure 3 is disturbance condition number')
    print('A large number indicates that the disturbance'
          'is in a bad direction')
    print('')

    # Eq 6-43 pg 238 disturbance condition number
    # this in done over a frequency range to see if there are possible
    # problems at higher frequencies finding yd
    dist_condition_num = [
        np.linalg.svd(G(w_i))[1][0] * np.linalg.svd(
            np.linalg.pinv(G(w_i))[1][0] * np.linalg.svd(Gd(w_i))[1][0] *
            np.linalg.svd(Gd(w_i))[0][:, 0])[1][0] for w_i in w
    ]

    plt.figure(3)
    plt.title('yd Condition number')
    plt.ylabel('condition number')
    plt.xlabel('w')
    plt.loglog(w, dist_condition_num)

    print('Figure 4 is the singular value of an specific output with input '
          'and disturbance direction vector')
    print('The solid blue line needs to be large than the red line')
    print('This only needs to be checked up to frequencies where |u**H gd| >1')
    print('')

    # Checking input saturation for acceptable control  disturbance rejection
    # Equation 6-55 pg 241 in Skogestad
    # Checking each singular values and the associated input vector with
    # output direction vector of Gd just for square systems for now

    # Revised method including all the possibilities of outputs i
    store_rhs_eq = np.zeros([np.shape(G(0.0001))[0], len(w)])
    store_lhs_eq = np.zeros([np.shape(G(0.0001))[0], len(w)])

    for i in range(len(w)):
        for j in range(np.shape(G(0.0001))[0]):
            store_rhs_eq[j, i] = (np.abs(
                np.linalg.svd(G(w[i]))[2][:, j].H *
                np.max(np.linalg.svd(Gd(w[i]))[1]) *
                np.linalg.svd(Gd(w[i]))[0][:, 0]) - 1)
            store_lhs_eq[j, i] = sc_lin.svd(G(w[i]))[1][j]

    plot_freq_subplot(plt, w, store_rhs_eq, 'Acceptable control eq6-55', 'r',
                      4)
    plot_freq_subplot(plt, w, store_lhs_eq, 'Acceptable control eq6-55', 'b',
                      4)

    print('Figure 5 is to check input saturation for reference changes')
    print('Red line in both graphs needs to be larger than the blue '
          'line for values w < wr')
    print('Shows the wr up to where control is needed')
    print('')

    # Checking input saturation for perfect control with reference change
    # Eq 6-52 pg 241

    # Checking input saturation for perfect control with reference change
    # Another equation for checking input saturation with reference change
    # Eq 6-53 pg 241

    plt.figure(5)
    ref_perfect_const_plot(G, reference_change(), 0.01, w_start, w_end)

    print('Figure 6 is the maximum and minimum singular values of G over '
          'a frequency range')
    print('Figure 6 is also the maximum and minimum singular values of Gd '
          'over a frequency range')
    print('Blue is the minimum values and Red is the maximum singular values')
    print('Plot of Gd should be smaller than 1 else control is needed at '
          'frequencies where Gd is bigger than 1')
    print('')

    # Checking input saturation for acceptable control with reference change
    # Added check for controllability is the minimum and maximum singular
    # values of system transfer function matrix
    # as a function of frequency condition number added to check for how
    # prone the system would be to uncertainty

    singular_min_G = [np.min(np.linalg.svd(G(1j * w_i))[1]) for w_i in w]
    singular_max_G = [np.max(np.linalg.svd(G(1j * w_i))[1]) for w_i in w]
    singular_min_Gd = [np.min(np.linalg.svd(Gd(1j * w_i))[1]) for w_i in w]
    singular_max_Gd = [np.max(np.linalg.svd(Gd(1j * w_i))[1]) for w_i in w]
    condition_num_G = [
        np.max(np.linalg.svd(G(1j * w_i))[1]) /
        np.min(np.linalg.svd(G(1j * w_i))[1]) for w_i in w
    ]

    plt.figure(6)
    plt.subplot(311)
    plt.title('min_S(G(jw)) and max_S(G(jw))')
    plt.loglog(w, singular_min_G, 'b')
    plt.loglog(w, singular_max_G, 'r')

    plt.subplot(312)
    plt.title('Condition number of G')
    plt.loglog(w, condition_num_G)

    plt.subplot(313)
    plt.title('min_S(Gd(jw)) and max_S(Gd(jw))')
    plt.loglog(w, singular_min_Gd, 'b')
    plt.loglog(w, singular_max_Gd, 'r')
    plt.loglog([w[0], w[-1]], [1, 1])

    plt.show()

    return Ms_min
コード例 #39
0
	def __init__(self, mol, mints):
		"""
		Initializes molecule, molecular properties, molecular integrals,
		before entering a loop to calculate the RHF energy 

		:param mol: molecule object, specifies geometry, charge, and multiplicity 
		:param mints: molecular integral helper, generates various molecular intergrals
		:param convCrit: criteria for converge, x in input corresponds to maximum difference of 10^-x
		:param maxIter: maximum number of iterations to obtain self consistence
		"""
			
		self.mol = mol
		self.mints = mints
		self.convCrit = psi4.core.get_global_option('E_CONVERGENCE')
		self.maxIter = psi4.core.get_global_option('MAXITER')
		
		#Step 1: Read nuclear repulsion energy from molecule and atomic integrals from MintsHelper	
		self.VNuc = mol.nuclear_repulsion_energy() 		#nuclear repulsion energy
		self.S = np.array(mints.ao_overlap()) 			#overlap integrals
		self.T = np.array(mints.ao_kinetic()) 			#kinetic energy integrals
		self.V = np.array(mints.ao_potential()) 		#electron-nuclear attraction integrals
		self.g = np.array(mints.ao_eri()).transpose(0,2,1,3) 	#electron-electron repulsion integrals, transposed from (pq|rs) to <pr|qs>
		#The transpose is very important, because from here on forward physicist notation is assumed!
		
		#Step 1.5: Calculate Hamiltonian and orbital information 
		self.H = self.T + self.V 		#Hamiltonian
		self.E = 0.0 				#RHF energy
		self.norb = mints.basisset().nbf() 	#number of orbits (defines size of arrays) 	
		self.nelec = - mol.molecular_charge() 	#number of electrons
		for atom in range(mol.natom()):
			self.nelec += mol.Z(atom)
		self.nocc = int(self.nelec / 2)		#number of occupied orbitals
					
		#Step 2: Form orthogonalizer (X = S^-1/2)
		self.X = np.matrix(spla.inv(spla.sqrtm(self.S)))	#Orthogonalizer S^-1/2
		
		#Step 3: Set D = 0 as "core" guess
		self.D = np.zeros((self.norb,self.norb))		#Density Matrix
		
		#Iteration to SC
		convCond = False 				#convergence condition 
		self.Eold = 1.0					#Previous calculated energy
		self.Dold = np.zeros((self.norb,self.norb))  	#Previous density matrix
		self.iter = 0					#Iteration count
		
		line = "+----+--------------------+------------+"
		print line
		print  "|iter| Energy             | dE         |"
		print line
		while not convCond:
			self.I2SC() #Steps 1-7
			#Check convergence, if the energy and density matrix are both within a threshold of one another, then it has converged
			#Additionally the program must iterate twice to avoid the condition when all four variables are initially null
			if (np.absolute((self.Eold - self.E)) < self.convCrit and np.absolute(spla.norm(self.D) - spla.norm(self.Dold)) < self.convCrit and self.iter > 1):
				print line
				print  "| Converged                            |"
				convCond = True
			elif self.iter >= self.maxIter:
				print line
				print  "| Failed to converge                   |"
				break
		print line
コード例 #40
0
def simulate(copula, n):
    """
    Generates random variables with selected copula's structure.

    Parameters
    ----------
    copula : Copula
        The Copula to sample.
    n : integer
        The size of the sample.
    """
    d = copula.dimension()

    X = []
    if type(copula).__name__ == "Copula" and copula.name == "indep":
        for i in range(n):
            X.append([np.random.uniform() for j in range(d)])
    elif type(copula).__name__ == "Copula" and copula.name == "frechet_up":
        for i in range(n):
            Xi = np.random.uniform(size=d)
            Xi = np.full_like(Xi, Xi.min())
            X.append(Xi)
    elif type(copula).__name__ == "Copula" and copula.name == "frechet_down":
        if d != 2:
            raise ValueError(
                "Fréchet lower bound is not a copula for dimensions other than 2"
            )
        for i in range(n):
            Xi = np.random.uniform(size=2)
            Xi[1] = 1 - Xi[0]
            X.append(Xi)
    elif type(copula).__name__ == "GaussianCopula":
        # We get correlation matrix from covariance matrix
        Sigma = copula.get_corr()
        D = sqrtm(np.diag(np.diag(Sigma)))
        Dinv = inv(D)
        P = np.dot(np.dot(Dinv, Sigma), Dinv)
        A = cholesky(P)

        for i in range(n):
            Z = np.random.normal(size=d)
            V = np.dot(A, Z)
            U = stats.norm.cdf(V)
            X.append(U)
    elif type(copula).__name__ == "ArchimedeanCopula":
        U = np.random.rand(n, d)

        # Laplace–Stieltjes invert transform
        LSinv = {
            'clayton':
            lambda theta: np.random.gamma(shape=1. / theta),
            'gumbel':
            lambda theta: stats.levy_stable.rvs(
                1. / theta, 1., 0,
                math.cos(math.pi / (2 * theta))**theta),
            'frank':
            lambda theta: stats.logser.rvs(1. - math.exp(-theta)),
            'amh':
            lambda theta: stats.geom.rvs(theta)
        }

        for i in range(n):
            V = LSinv[copula.getFamily()](copula.get_parameter())
            X_i = [copula.inverse_generator(-np.log(u) / V) for u in U[i, :]]
            X.append(X_i)
    elif type(copula).__name__ == "StudentCopula":
        nu = copula.get_df()
        Sigma = copula.get_corr()

        for i in range(n):
            Z = multivariate_normal.rvs(size=1, cov=Sigma)
            W = invgamma.rvs(nu / 2., size=1)
            U = np.sqrt(W) * Z
            X_i = [student.cdf(u, nu) for u in U]
            X.append(X_i)

    return X
コード例 #41
0
def qpfs_body(X,
              y,
              fn,
              alpha=None,
              r=None,
              sigma=None,
              solv='quadprog',
              metric_for_complex=lambda x: math.sqrt(x.imag**2 + x.real**2)):
    # TODO understand why complex double appears
    # TODO find suitable r parameter value
    # TODO find suitable sigma parameter value
    if r is None:
        r = X.shape[1] - 1
    if r >= X.shape[1]:
        raise TypeError(
            "r parameter should be less than the number of features")
    F = np.zeros(X.shape[1], dtype=np.double
                 )  # F vector represents how each variable is correlated class
    class_size = max(
        y
    ) + 1  # Count the number of classes, we assume that class labels would be numbers from 1 to max(y)
    priors = np.histogram(
        y, bins=max(y))[0]  # Count prior probabilities of classes
    for i in range(1, class_size):  # Loop through classes
        Ck = np.where(
            y == i, 1,
            0)  # Get array C(i) where C(k) is 1 when i = k and 0 otherwise
        F += priors[i - 1] * fn(X, Ck)  # Counting F vector
    Q = np.apply_along_axis(partial(fn, X), 0,
                            X).reshape(X.shape[1], X.shape[1])
    indices = np.random.random_integers(
        0, Q.shape[0] - 1,
        r)  # Taking random r indices according to Nystrom approximation
    A = Q[
        indices][:, :
                 r]  # A matrix for Nystrom(matrix of real numbers with size of [r, r])
    B = Q[
        indices][:,
                 r:]  # B matrix for Nystrom(matrix of real numbers with size of [r, M - r])
    if alpha is None:
        alpha = __countAlpha(
            A, B, F
        )  # Only in filter method, in wrapper we should adapt it based on performance
    AInvSqrt = sqrtm(
        np.linalg.pinv(A))  # Calculate squared root of inverted matrix A
    S = np.add(A, AInvSqrt.dot(B).dot(B.T).dot(AInvSqrt))  # Caluclate S matrix
    eigvals, EVect = np.linalg.eig(S)  # eigenvalues and eigenvectors of S
    U = np.append(A, B.T, axis=0).dot(AInvSqrt).dot(EVect).dot(
        sqrtm(np.linalg.pinv(EVect)))  # Eigenvectors of Q matrix using [A B]
    eigvalsFilt, UFilt = __filterBy(
        sigma, eigvals, U
    )  # Take onyl eigenvalues greater than threshold and corresponding eigenvectors
    LFilt = np.zeros(
        (len(eigvalsFilt), len(eigvalsFilt)),
        dtype=complex)  # initialize diagonal matrix of eigenvalues
    for i in range(len(eigvalsFilt)):  # Loop through eigenvalues
        LFilt[i][i] = eigvalsFilt[i]  # Init diagonal values
    UFilt = np.array([list(map(metric_for_complex, t)) for t in UFilt])
    LFilt = np.array([list(map(metric_for_complex, t)) for t in LFilt])
    yf = solve_qp((1 - alpha) * LFilt,
                  alpha * F.dot(UFilt),
                  UFilt,
                  np.zeros(UFilt.shape[0]),
                  solver=solv)  # perform qp on stated problem
    xSolution = UFilt.dot(yf)  # Find x - weights of features
    forRanks = list(zip(
        xSolution, F,
        [x for x in range(len(F))]))  # Zip into array of tuple for proper sort
    forRanks.sort(reverse=True)
    ranks = np.zeros(len(F))
    rankIndex = 1
    for i in forRanks:
        ranks[int(i[2])] = rankIndex
        rankIndex += 1
    return ranks
コード例 #42
0
def calculate_frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-6):
    """Numpy implementation of the Frechet Distance.
    The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)
    and X_2 ~ N(mu_2, C_2) is
            d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).

    Stable version by Dougal J. Sutherland.

    Params:
    -- mu1   : Numpy array containing the activations of a layer of the
               inception net (like returned by the function 'get_predictions')
               for generated samples.
    -- mu2   : The sample mean over activations, precalculated on an
               representative datasets set.
    -- sigma1: The covariance matrix over activations for generated samples.
    -- sigma2: The covariance matrix over activations, precalculated on an
               representative datasets set.

    Returns:
    --   : The Frechet Distance.
    """

    mu1 = np.atleast_1d(mu1)
    mu2 = np.atleast_1d(mu2)

    sigma1 = np.atleast_2d(sigma1)
    sigma2 = np.atleast_2d(sigma2)

    assert mu1.shape == mu2.shape, \
        'Training and test mean vectors have different lengths'
    assert sigma1.shape == sigma2.shape, \
        'Training and test covariances have different dimensions'

    diff = mu1 - mu2

    # print(t.max())
    # print(t.min())
    # print(abs(t).mean())
    # print(t.mean())

    # Product might be almost singular
    t = sigma1.dot(sigma2)
    for i in range(30):
        # print(i)
        flag = True
        covmean, _ = linalg.sqrtm(t, disp=False)
        if not np.isfinite(covmean).all():
            msg = ('fid calculation produces singular product; '
                   'adding %s to diagonal of cov estimates') % eps
            print(msg)
            offset = np.eye(sigma1.shape[0]) * eps
            covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset))

        # Numerical error might give slight imaginary component
        if np.iscomplexobj(covmean):
            if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3):
                # raise ValueError('Imaginary component {}'.format(m))
                flag = False
            covmean = covmean.real
        if flag:
            break
    if not flag:
        print('Warning: the fid may be incorrect!')
    tr_covmean = np.trace(covmean)

    return (diff.dot(diff) + np.trace(sigma1) + np.trace(sigma2) - 2 * tr_covmean)
コード例 #43
0
ファイル: dof10.py プロジェクト: jrooney4450/crazyflie
hi = 20
t = np.linspace(0, hi, sze)
print(t)
P = np.zeros([n, sze], float)
x = np.zeros([n, sze], float)

k = 1e2
print(k)
m = 2
x0 = np.zeros([n, 1], float)
x0[0] = 3
print(x0)
v0 = np.zeros([n, 1])

M = m * np.eye(n)
L = lin.sqrtm(M)
print(M)
print(L)
K = 2 * k * np.eye(n)
K[n - 1][n - 1] = k

for i in range(0, n - 1):
    K[i + 1][i] = -1 * k
    K[i][i + 1] = -1 * k

print(K)
Linv = lin.inv(L)
Ktilde = Linv @ K @ Linv
print(Ktilde)
D, V = lin.eig(Ktilde)
print(D)
コード例 #44
0
    def init_params(self, ini):
        self.field_names = getattr(self, 'field_names', ['T', 'E', 'B', 'P'])
        self.tot_theory_fields = len(self.field_names)
        self.map_names = ini.split('map_names', default=[])
        self.has_map_names = bool(self.map_names)
        if self.has_map_names:
            # e.g. have multiple frequencies for given field measurement
            map_fields = ini.split('map_fields')
            if len(map_fields) != len(self.map_names):
                raise LoggedError(self.log,
                                  'number of map_fields does not match map_names')
            self.map_fields = [self.typeIndex(f) for f in map_fields]
        else:
            self.map_names = self.field_names
            self.map_fields = np.arange(len(self.map_names), dtype=int)
        fields_use = ini.split('fields_use', [])
        if len(fields_use):
            index_use = [self.typeIndex(f) for f in fields_use]
            use_theory_field = [i in index_use for i in range(self.tot_theory_fields)]
        else:
            if not self.has_map_names:
                raise LoggedError(self.log, 'must have fields_use or map_names')
            use_theory_field = [True] * self.tot_theory_fields
        maps_use = ini.split('maps_use', [])
        if len(maps_use):
            if any(not i for i in use_theory_field):
                self.log.warning('maps_use overrides fields_use')
            self.use_map = [False] * len(self.map_names)
            for j, map_used in enumerate(maps_use):
                if map_used in self.map_names:
                    self.use_map[self.map_names.index(map_used)] = True
                else:
                    raise LoggedError(self.log, 'maps_use item not found - %s' % map_used)
        else:
            self.use_map = [use_theory_field[self.map_fields[i]]
                            for i in range(len(self.map_names))]
        # Bandpowers can depend on more fields than are actually used in likelihood
        # e.g. for correcting leakage or other linear corrections
        self.require_map = self.use_map[:]
        if self.has_map_names:
            if ini.hasKey('fields_required'):
                raise LoggedError(self.log, 'use maps_required not fields_required')
            maps_use = ini.split('maps_required', [])
        else:
            maps_use = ini.split('fields_required', [])
        if len(maps_use):
            for j, map_used in enumerate(maps_use):
                if map_used in self.map_names:
                    self.require_map[self.map_names.index(map_used)] = True
                else:
                    raise LoggedError(self.log, 'required item not found %s' % map_used)
        self.required_theory_field = [False for _ in self.field_names]
        for i in range(len(self.map_names)):
            if self.require_map[i]:
                self.required_theory_field[self.map_fields[i]] = True
        self.ncl_used = 0  # set later reading covmat
        self.like_approx = ini.string('like_approx', 'gaussian')
        self.nmaps = np.count_nonzero(self.use_map)
        self.nmaps_required = np.count_nonzero(self.require_map)
        self.required_order = np.zeros(self.nmaps_required, dtype=int)
        self.map_required_index = -np.ones(len(self.map_names), dtype=int)
        ix = 0
        for i in range(len(self.map_names)):
            if self.require_map[i]:
                self.map_required_index[i] = ix
                self.required_order[ix] = i
                ix += 1
        self.map_used_index = -np.ones(len(self.map_names), dtype=int)
        ix = 0
        self.used_map_order = []
        for i, map_name in enumerate(self.map_names):
            if self.use_map[i]:
                self.map_used_index[i] = ix
                self.used_map_order.append(map_name)
                ix += 1
        self.ncl = (self.nmaps * (self.nmaps + 1)) // 2
        self.pcl_lmax = ini.int('cl_lmax')
        self.pcl_lmin = ini.int('cl_lmin')
        self.binned = ini.bool('binned', True)
        if self.binned:
            self.nbins = ini.int('nbins')
            self.bin_min = ini.int('use_min', 1) - 1
            self.bin_max = ini.int('use_max', self.nbins) - 1
            # needed by read_bin_windows:
            self.nbins_used = self.bin_max - self.bin_min + 1
            self.bins = self.read_bin_windows(ini, 'bin_window')
        else:
            if self.nmaps != self.nmaps_required:
                raise LoggedError(
                    self.log, 'unbinned likelihood must have nmaps==nmaps_required')
            self.nbins = self.pcl_lmax - self.pcl_lmin + 1
            if self.like_approx != 'exact':
                self.log.warning('Unbinned likelihoods untested in this version')
            self.bin_min = ini.int('use_min', self.pcl_lmin)
            self.bin_max = ini.int('use_max', self.pcl_lmax)
            self.nbins_used = self.bin_max - self.bin_min + 1
        self.full_bandpower_headers, self.full_bandpowers, self.bandpowers = \
            self.read_cl_array(ini, 'cl_hat', return_full=True)
        if self.like_approx == 'HL':
            self.cl_fiducial = self.read_cl_array(ini, 'cl_fiducial')
        else:
            self.cl_fiducial = None
        includes_noise = ini.bool('cl_hat_includes_noise', False)
        self.cl_noise = None
        if self.like_approx != 'gaussian' or includes_noise:
            self.cl_noise = self.read_cl_array(ini, 'cl_noise')
            if not includes_noise:
                self.bandpowers += self.cl_noise
            elif self.like_approx == 'gaussian':
                self.bandpowers -= self.cl_noise
        self.cl_lmax = np.zeros((self.tot_theory_fields, self.tot_theory_fields))
        for i in range(self.tot_theory_fields):
            if self.required_theory_field[i]:
                self.cl_lmax[i, i] = self.pcl_lmax
        if self.required_theory_field[0] and self.required_theory_field[1]:
            self.cl_lmax[1, 0] = self.pcl_lmax

        if self.like_approx != 'gaussian':
            cl_fiducial_includes_noise = ini.bool('cl_fiducial_includes_noise', False)
        else:
            cl_fiducial_includes_noise = False
        self.bandpower_matrix = np.zeros((self.nbins_used, self.nmaps, self.nmaps))
        self.noise_matrix = self.bandpower_matrix.copy()
        self.fiducial_sqrt_matrix = self.bandpower_matrix.copy()
        if self.cl_fiducial is not None and not cl_fiducial_includes_noise:
            self.cl_fiducial += self.cl_noise
        for b in range(self.nbins_used):
            self.elements_to_matrix(self.bandpowers[:, b], self.bandpower_matrix[b, :, :])
            if self.cl_noise is not None:
                self.elements_to_matrix(self.cl_noise[:, b], self.noise_matrix[b, :, :])
            if self.cl_fiducial is not None:
                self.elements_to_matrix(self.cl_fiducial[:, b],
                                        self.fiducial_sqrt_matrix[b, :, :])
                self.fiducial_sqrt_matrix[b, :, :] = (
                    sqrtm(self.fiducial_sqrt_matrix[b, :, :]))
        if self.like_approx == 'exact':
            self.fsky = ini.float('fullsky_exact_fksy')
        else:
            self.cov = self.ReadCovmat(ini)
            self.covinv = np.linalg.inv(self.cov)
        if 'linear_correction_fiducial_file' in ini.params:
            self.fid_correction = self.read_cl_array(ini, 'linear_correction_fiducial')
            self.linear_correction = self.read_bin_windows(ini,
                                                           'linear_correction_bin_window')
        else:
            self.linear_correction = None
        if ini.hasKey('nuisance_params'):
            s = ini.relativeFileName('nuisance_params')
            self.nuisance_params = ParamNames(s)
            if ini.hasKey('calibration_param'):
                raise Exception('calibration_param not allowed with nuisance_params')
            if ini.hasKey('calibration_paramname'):
                self.calibration_param = ini.string('calibration_paramname')
            else:
                self.calibration_param = None
        elif ini.string('calibration_param', ''):
            s = ini.relativeFileName('calibration_param')
            if '.paramnames' not in s:
                raise LoggedError(
                    self.log, 'calibration_param must be paramnames file unless '
                              'nuisance_params also specified')
            self.nuisance_params = ParamNames(s)
            self.calibration_param = self.nuisance_params.list()[0]
        else:
            self.calibration_param = None
        if ini.hasKey('log_calibration_prior'):
            self.log.warning('log_calibration_prior in .dataset ignored, '
                             'set separately in .yaml file')
        self.aberration_coeff = ini.float('aberration_coeff', 0.0)

        self.map_cls = self.init_map_cls(self.nmaps_required, self.required_order)
コード例 #45
0
def mvnrnd(xbar,Γ,n): 
    X=randn(2,n)
    X = (xbar @ ones((1,n))) + sqrtm(Γ) @ X
    return(X)    
コード例 #46
0
ファイル: random_objects.py プロジェクト: zuoyanizz/qutip
def rand_super_bcsz(N=2, enforce_tp=True, rank=None, dims=None, seed=None):
    """
    Returns a random superoperator drawn from the Bruzda
    et al ensemble for CPTP maps [BCSZ08]_. Note that due to
    finite numerical precision, for ranks less than full-rank,
    zero eigenvalues may become slightly negative, such that the
    returned operator is not actually completely positive.


    Parameters
    ----------
    N : int
        Square root of the dimension of the superoperator to be returned.
    enforce_tp : bool
        If True, the trace-preserving condition of [BCSZ08]_ is enforced;
        otherwise only complete positivity is enforced.
    rank : int or None
        Rank of the sampled superoperator. If None, a full-rank
        superoperator is generated.
    dims : list
        Dimensions of quantum object.  Used for specifying
        tensor structure. Default is dims=[[[N],[N]], [[N],[N]]].

    Returns
    -------
    rho : Qobj
        A superoperator acting on vectorized dim × dim density operators,
        sampled from the BCSZ distribution.
    """
    if dims is not None:
        # TODO: check!
        pass
    else:
        dims = [[[N], [N]], [[N], [N]]]

    if rank is None:
        rank = N**2
    if rank > N**2:
        raise ValueError("Rank cannot exceed superoperator dimension.")

    # We use mainly dense matrices here for speed in low
    # dimensions. In the future, it would likely be better to switch off
    # between sparse and dense matrices as the dimension grows.

    # We start with a Ginibre uniform matrix X of the appropriate rank,
    # and use it to construct a positive semidefinite matrix X X⁺.
    X = randnz((N**2, rank), norm='ginibre', seed=seed)

    # Precompute X X⁺, as we'll need it in two different places.
    XXdag = np.dot(X, X.T.conj())

    if enforce_tp:
        # We do the partial trace over the first index by using dense reshape
        # operations, so that we can avoid bouncing to a sparse representation
        # and back.
        Y = np.einsum('ijik->jk', XXdag.reshape((N, N, N, N)))

        # Now we have the matrix 𝟙 ⊗ Y^{-1/2}, which we can find by doing
        # the square root and the inverse separately. As a possible improvement,
        # iterative methods exist to find inverse square root matrices directly,
        # as this is important in statistics.
        Z = np.kron(np.eye(N), sqrtm(la.inv(Y)))

        # Finally, we dot everything together and pack it into a Qobj,
        # marking the dimensions as that of a type=super (that is,
        # with left and right compound indices, each representing
        # left and right indices on the underlying Hilbert space).
        D = Qobj(np.dot(Z, np.dot(XXdag, Z)))
    else:
        D = N * Qobj(XXdag / np.trace(XXdag))

    D.dims = [
        # Left dims
        [[N], [N]],
        # Right dims
        [[N], [N]]
    ]

    # Since [BCSZ08] gives a row-stacking Choi matrix, but QuTiP
    # expects a column-stacking Choi matrix, we must permute the indices.
    D = D.permute([[1], [0]])

    D.dims = dims

    # Mark that we've made a Choi matrix.
    D.superrep = 'choi'

    return sr.to_super(D)
コード例 #47
0
    def fit(self, data):
        """
        Fit independent components using an iterative fixed-point algorithm

        Parameters
        ----------
        data: RDD of (tuple, array) pairs, or RowMatrix
            Data to estimate independent components from

        Returns
        ----------
        self : returns an instance of self.
        """

        d = len(data.first()[1])

        if self.k is None:
            self.k = d

        if self.c > self.k:
            raise Exception("number of independent comps " + str(self.c) +
                            " must be less than the number of principal comps " + str(self.k))

        if self.k > d:
            raise Exception("number of principal comps " + str(self.k) +
                            " must be less than the data dimensionality " + str(d))

        if type(data) is not RowMatrix:
            data = RowMatrix(data)

        # reduce dimensionality
        svd = SVD(k=self.k, method=self.svdmethod).calc(data)

        # whiten data
        whtmat = real(dot(inv(diag(svd.s/sqrt(data.nrows))), svd.v))
        unwhtmat = real(dot(transpose(svd.v), diag(svd.s/sqrt(data.nrows))))
        wht = data.times(whtmat.T)

        # do multiple independent component extraction
        if self.seed != 0:
            random.seed(self.seed)
        b = orth(random.randn(self.k, self.c))
        b_old = zeros((self.k, self.c))
        iter = 0
        minabscos = 0
        errvec = zeros(self.maxiter)

        while (iter < self.maxiter) & ((1 - minabscos) > self.tol):
            iter += 1
            # update rule for pow3 non-linearity (TODO: add others)
            b = wht.rows().map(lambda x: outer(x, dot(x, b) ** 3)).sum() / wht.nrows - 3 * b
            # make orthogonal
            b = dot(b, real(sqrtm(inv(dot(transpose(b), b)))))
            # evaluate error
            minabscos = min(abs(diag(dot(transpose(b), b_old))))
            # store results
            b_old = b
            errvec[iter-1] = (1 - minabscos)

        # get un-mixing matrix
        w = dot(b.T, whtmat)

        # get mixing matrix
        a = dot(unwhtmat, b)

        # get components
        sigs = data.times(w.T).rdd

        self.w = w
        self.a = a
        self.sigs = sigs

        return self
コード例 #48
0
def Frеchet_Inception_Distance(orig_images,
                               gen_images,
                               model_predictor,
                               device=torch.device("cuda:0" if (
                                   torch.cuda.is_available()) else "cpu"),
                               bs=100):
    """
    Calculates FID beеween original and generated images

    Parameters
    ----------
    orig_images : Torch Dataset
        Dataset that returns generated image
    gen_images : Torch Dataset
        Dataset that returns generated image
    model_predictor : PyTorch model
        Model that extract feature vector of the image for calculating KL divergence
    bs : int
        Batch size
    Returns
    -------
    FID : float
    """
    model_predictor.eval()

    orig_loader = DataLoader(orig_images, batch_size=bs, shuffle=False)

    orig_predictions = []
    for i, data in enumerate(orig_loader):
        data = data.to(device)
        with torch.no_grad():
            logits = model_predictor(data)
            p_yx = F.softmax(logits, dim=1).cpu().numpy()
        orig_predictions.append(p_yx)

    orig_predictions = np.vstack(orig_predictions)

    gen_loader = DataLoader(gen_images, batch_size=bs, shuffle=False)

    gen_predictions = []
    for i, data in enumerate(gen_loader):
        data = data.to(device)
        with torch.no_grad():
            logits = model_predictor(data)
            p_yx = F.softmax(logits, dim=1).cpu().numpy()
        gen_predictions.append(p_yx)

    gen_predictions = np.vstack(gen_predictions)

    orig_mean = orig_predictions.mean(axis=0)
    gen_mean = gen_predictions.mean(axis=0)

    orig_cov = np.cov(orig_predictions, rowvar=False)
    gen_cov = np.cov(gen_predictions, rowvar=False)

    mean_dist = ((orig_mean - gen_mean)**2).sum()
    cov_dist = np.trace(orig_cov + gen_cov - 2 * sqrtm(orig_cov @ gen_cov))
    if np.iscomplexobj(cov_dist):
        cov_dist = cov_dist.real

    return mean_dist + cov_dist
コード例 #49
0
    def _posterior_covariance(self, hypothesis):
        """
        Return the posterior covariance for a given hypothesis. Hypothesis contains the predicted
        state covariance in square root form, the measurement prediction (which in turn contains
        the measurement cross covariance, :math:`P_{k|k-1} H_k^T and the innovation covariance,
        :math:`S = H_k P_{k|k-1} H_k^T + R`, not in square root form). The hypothesis or the
        updater contain the measurement noise matrix. The :attr:`sqrt_measurement_noise` flag
        indicates whether we should use the square root form of this matrix (True) or its full
        form (False).

        Parameters
        ----------
        hypothesis: :class:`~.Hypothesis`
            A hypothesised association between state prediction and measurement

        Method
        ------
        If the :attr:`qr_method` flag is set to True then the update proceeds via a QR
        decomposition which requires only one further matrix inversion (see [1]), rather than
        three plus a Cholesky factorisation, for the method set out in [2].

        Returns
        -------
        : numpy.array
            The posterior covariance matrix rendered via the Kalman update process in
            lower-triangular form.
        : numpy.array
            The Kalman gain, :math:`K = P_{k|k-1} H_k^T S^{-1}`

        """
        # Do we already have a measurement model?
        measurement_model = \
            self._check_measurement_model(hypothesis.measurement.measurement_model)
        # Square root of the noise covariance, account for the fact that it may be supplied in one
        # of two ways
        try:
            sqrt_noise_cov = measurement_model.sqrt_covar
        except AttributeError:
            sqrt_noise_cov = la.sqrtm(measurement_model.covar())

        if self.qr_method:
            # The prior and noise covariances and the measurement matrix
            sqrt_prior_cov = hypothesis.prediction.sqrt_covar
            bigh = measurement_model.matrix()

            # Set up and execute the QR decomposition
            measdim = measurement_model.ndim_meas
            zeros = np.zeros((measurement_model.ndim_state, measdim))
            biga = np.block([[sqrt_noise_cov, bigh @ sqrt_prior_cov],
                             [zeros, sqrt_prior_cov]])
            _, upper = np.linalg.qr(biga.T)

            # Extract meaningful quantities
            atheta = upper.T
            sqrt_innov_cov = atheta[:measdim, :measdim]
            kalman_gain = atheta[measdim:, :measdim] @ (
                np.linalg.inv(sqrt_innov_cov))
            post_cov = atheta[measdim:, measdim:]
        else:
            # Kalman gain
            kalman_gain = \
                hypothesis.prediction.sqrt_covar @ \
                hypothesis.measurement_prediction.cross_covar @ \
                np.linalg.inv(hypothesis.measurement_prediction.covar)
            # Square root of the innovation covariance
            sqrt_innov_cov = la.sqrtm(hypothesis.measurement_prediction.covar)
            # Posterior covariance
            post_cov = hypothesis.prediction.sqrt_covar @ \
                (np.identity(hypothesis.prediction.ndim) -
                 hypothesis.measurement_prediction.cross_covar @ np.linalg.inv(sqrt_innov_cov.T) @
                 np.linalg.inv(sqrt_innov_cov + sqrt_noise_cov) @
                 hypothesis.measurement_prediction.cross_covar.T)

        return post_cov, kalman_gain
コード例 #50
0
# PRECONDITIONING {{{1

precond_vec = np.zeros(m.ip.d)
precond_mat = np.eye(m.ip.d)

preconditioning = True
if preconditioning:

    precond_vec_file = "data_julia/precond_vec.txt"
    precond_mat_file = "data_julia/precond_mat.txt"

    if os.path.exists(precond_vec_file):
        precond_vec = np.loadtxt(precond_vec_file)
        precond_mat = np.loadtxt(precond_mat_file)

precond_mat = la.sqrtm(precond_mat)

# MULTISCALE METHOD {{{1
# Test MD solver
solver_md = solvers.MdSolver(
    J=8,
    delta=1e-5,
    sigma=1e-5,
    dt=(1 if preconditioning else 1/m.k**4),
    reg=False,
    noise=False,
    parallel=True,
    adaptive=False,
    dt_min=1e-7,
    dt_max=.1,
    precond_vec=precond_vec,
コード例 #51
0
    for v in range(CHAINLENGTH, 1, -1):
        OPP = I_C
        NEWSYSTEM2 = NEW2
        for w in range(v - 2):
            OPP = np.kron(OPP, I_C)
        OPP1 = np.kron(OPP, ket1)
        OPP2 = np.kron(OPP, ket2)
        OPP3 = np.kron(OPP, np.matrix.getH(ket1))
        OPP4 = np.kron(OPP, np.matrix.getH(ket2))
        dim = (np.power(2, v - 1), np.power(2, v - 1))
        NEW2 = np.zeros(dim)
        NEW2 = OPP3 * NEWSYSTEM2 * OPP1 + OPP4 * NEWSYSTEM2 * OPP2
    x = NEW1
    y = NEW2
    #    y = INITIALCHAIN
    z = linalg.sqrtm(np.matrix.getH(x - y) * (x - y))
    results.append(0.5 * np.absolute(np.trace(z)))

    SYSTEM = np.matrix(EXP1) * np.matrix(SYSTEM) * np.matrix(EXP2)
    SYSTEM2 = np.matrix(EXP1) * np.matrix(SYSTEM2) * np.matrix(EXP2)
###################################PLOT OPTIONS

font = {'family': 'sans-serif', 'weight': 'light', 'size': 12}
ax = plt.subplot(111)
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
plt.tick_params(axis="both",
                which="both",
                bottom="on",
                top="off",
                labelbottom="on",
コード例 #52
0
 def get_correlated_weights(self, nthreads=0):
     if self._correlated_weights is None:
         self._correlated_weights = splinalg.sqrtm(self.weights)
     return self._correlated_weights
コード例 #53
0
def wass_dist(A, B):
    Root_1= slg.sqrtm(A)
    Root_2= slg.sqrtm(B)
    return np.trace(A) + np.trace(B) - 2*np.trace(slg.sqrtm(Root_1 @ B @ Root_1)) 
コード例 #54
0
            if l < 0.01:
                opt = torch.optim.SGD([P_sqrt], lr=.1)

        return losses


P_lqr = lqr(A, B, np.eye(N), np.eye(M), W)

LQR = COCP(A, B, W, np.eye(N), np.eye(M), 100, 6)

R = np.eye(M)
Q = np.eye(N)

import matplotlib
import matplotlib.pyplot as plt

Kt = np.linalg.solve(R + B.T @ P_lqr @ B, -B.T @ P_lqr @ A)
loss_lqr = LQR.loss(torch.from_numpy(sqrtm(P_lqr)), seed=0).item()
print("Loss LQR : ", loss_lqr)

losses = LQR.optimize()

plt.semilogy(losses, color='k', label='COCP')
plt.gca().yaxis.set_minor_formatter(matplotlib.ticker.ScalarFormatter())
plt.axhline(loss_lqr, linestyle='--', color='k', label='LQR')
plt.ylabel("cost")
plt.xlabel("Iterations")
plt.subplots_adjust(left=.15, bottom=.2)
plt.savefig("lqr.pdf")
plt.show()
コード例 #55
0
 def test_gh5336(self):
     M = np.diag([2, 1, 0])
     R = np.diag([sqrt(2), 1, 0])
     assert_allclose(np.dot(R, R), M, atol=1e-14)
     assert_allclose(sqrtm(M), R, atol=1e-14)
コード例 #56
0
def weighted_constrained_least_squares(A,
                                       b,
                                       Cov_b=None,
                                       equality_constraints=None,
                                       inequality_constraints=None):
    """
    Solves a weighted, constrained least squares problem using cvxpy.
    The objective function is to minimize the following:
    sum_squares(Cov_b^(-1/2).A.x - Cov_b^(-1/2).b))
    subject to
    C.x == c
    D.x <= d

    Parameters
    ----------
    A : 2D numpy array
        The matrix A in the objective function above.

    b : numpy array
        The vector b in the objective function above.

    Cov_b : 2D numpy array
        The covariance matrix associated with b

    equality_constraints : list containing a 2D array and 1D array
        The list contains the matrices C and c in the objective function above.

    inequality_constraints : list containing a 2D array and 1D array
        The list contains the matrices D and d in the objective function above.


    Returns
    -------
    popt : numpy array
        Optimized phase amounts.

    pcov : 2D numpy array
        Covariance matrix corresponding to the optimized phase amounts.

    res : float
        The weighted residual of the fitting procedure.
    """

    if Cov_b is None:
        Cov_b = np.eye(len(b))

    # Create the standard weighted least squares objective function
    # (https://stats.stackexchange.com/a/333551)
    n_vars = A.shape[1]
    m = inv(sqrtm(Cov_b))
    mA = m @ A
    mb = m @ b
    x = cp.Variable(n_vars)
    objective = cp.Minimize(cp.sum_squares(mA @ x - mb))

    constraints = []
    if equality_constraints is not None:
        n_eq_csts = len(equality_constraints[0])
        constraints = [
            equality_constraints[0][i] @ x == equality_constraints[1][i]
            for i in range(n_eq_csts)
        ]

    if inequality_constraints is not None:
        n_ineq_csts = len(inequality_constraints[0])
        constraints.extend([
            inequality_constraints[0][i] @ x <= inequality_constraints[1][i]
            for i in range(n_ineq_csts)
        ])

    # Set up the problem and solve it
    warns = []
    if len(constraints) > 1:
        prob = cp.Problem(objective, constraints)
    else:
        prob = cp.Problem(objective)

    try:
        with warnings.catch_warnings(record=True) as w:
            res = prob.solve(solver=cp.ECOS)
            popt = np.array([x.value[i] for i in range(len(A.T))])
            warns.extend(w)
    except Exception:
        print('ECOS Solver failed. Trying default solver.')
        try:
            with warnings.catch_warnings(record=True) as w:
                res = prob.solve()
                popt = np.array([x.value[i] for i in range(len(A.T))])
                warns.extend(w)
        except Exception as e:
            raise Exception(e)

    # Calculate the covariance matrix
    # (also from https://stats.stackexchange.com/a/333551)
    inv_Cov_b = np.linalg.inv(Cov_b)
    pcov = np.linalg.inv(A.T.dot(inv_Cov_b.dot(A)))

    return (popt, pcov, res)
コード例 #57
0
    def test_disp(self):
        np.random.seed(1234)

        A = np.random.rand(3, 3)
        B = sqrtm(A, disp=True)
        assert_allclose(B.dot(B), A)
コード例 #58
0
A = np.kron(INITIALCOIN, INITIALCHAIN)
for x in range(CHAINLENGTH - 2):
    A = np.kron(A, INITIALCHAIN)
SYSTEM = A
########################################
#QUANTUM WALK
ket1 = np.matrix([[1], [0]])
ket2 = np.matrix([[0], [1]])
for r in range(STEPS):
    NEW1 = SYSTEM
    for x in range(CHAINLENGTH, 1, -1):
        OPP = I_C
        NEWSYSTEM = NEW1
        for y in range(x - 2):
            OPP = np.kron(OPP, I_C)
        OPP1 = np.kron(OPP, ket1)
        OPP2 = np.kron(OPP, ket2)
        OPP3 = np.kron(OPP, np.matrix.getH(ket1))
        OPP4 = np.kron(OPP, np.matrix.getH(ket2))
        dim = (np.power(2, x - 1), np.power(2, x - 1))
        NEW1 = np.zeros(dim)
        NEW1 = OPP3 * NEWSYSTEM * OPP1 + OPP4 * NEWSYSTEM * OPP2
    x = NEW1
    y = INITIALCHAIN
    z = linalg.sqrtm(np.conj(x - y) * (x - y))
    results.append(0.5 * np.absolute(np.trace(z)))

    SYSTEM = np.matrix(EXP1) * np.matrix(SYSTEM) * np.matrix(EXP2)

plt.plot(results)
コード例 #59
0
np.random.seed(8)

n = 5  # Dimension
N = 100  # Number of sample
N_test = 10  # Samples for test set

X = np.random.randn(n, N)
Y = np.random.randn(n, N)

X_test = np.random.randn(n, N_test)
Y_test = np.random.randn(n, N_test)

P = np.random.randn(n, n)
P = P.dot(P.T) + np.identity(n)
sqrtP = la.sqrtm(P)

d = np.linalg.norm(sqrtP.dot(X - Y), axis=0)
d = np.maximum(d + np.random.randn(N), 0)
d_test = np.linalg.norm(sqrtP.dot(X_test - Y_test), axis=0)
d_test = np.maximum(d_test + np.random.randn(N_test), 0)

ones = np.ones((1, N))
P = Semidef(n)
d_sq = (X - Y).T * P * (X - Y)
d_diag = diag(d_sq)
obj = Minimize(1.0 / N * (sum_entries(square(d - d_diag))))
cons = []
prob = Problem(obj, cons)
prob.solve()
print "Optimal value for d:", prob.value
コード例 #60
0
def sym(w):
    return w.dot(spl.inv(spl.sqrtm(w.T.dot(w))))