예제 #1
1
파일: mcgsm_test.py 프로젝트: jakirkham/cmt
	def test_mogsm(self):
		mcgsm = MCGSM(
			dim_in=0,
			dim_out=3,
			num_components=2,
			num_scales=2,
			num_features=0)

		p0 = 0.3
		p1 = 0.7
		N = 20000
		m0 = array([[2], [0], [0]])
		m1 = array([[0], [2], [1]])
		C0 = cov(randn(mcgsm.dim_out, mcgsm.dim_out**2))
		C1 = cov(randn(mcgsm.dim_out, mcgsm.dim_out**2))
		input = zeros([0, N])
		output = hstack([
			dot(cholesky(C0), randn(mcgsm.dim_out, round(p0 * N))) + m0,
			dot(cholesky(C1), randn(mcgsm.dim_out, round(p1 * N))) + m1]) * (rand(1, N) + 0.5)

		mcgsm.train(input, output, parameters={
			'verbosity': 0,
			'max_iter': 10,
			'train_means': True})

		mogsm = MoGSM(3, 2, 2)

		# translate parameters from MCGSM to MoGSM
		mogsm.priors = sum(exp(mcgsm.priors), 1) / sum(exp(mcgsm.priors))

		for k in range(mogsm.num_components):
			mogsm[k].mean = mcgsm.means[:, k]
			mogsm[k].covariance = inv(dot(mcgsm.cholesky_factors[k], mcgsm.cholesky_factors[k].T))
			mogsm[k].scales = exp(mcgsm.scales[k, :])
			mogsm[k].priors = exp(mcgsm.priors[k, :]) / sum(exp(mcgsm.priors[k, :]))

		self.assertAlmostEqual(mcgsm.evaluate(input, output), mogsm.evaluate(output), 5)

		mogsm_samples = mogsm.sample(N)
		mcgsm_samples = mcgsm.sample(input)

		# generated samples should have the same distribution
		for i in range(mogsm.dim):
			self.assertTrue(ks_2samp(mogsm_samples[i], mcgsm_samples[0]) > 0.0001)
			self.assertTrue(ks_2samp(mogsm_samples[i], mcgsm_samples[1]) > 0.0001)
			self.assertTrue(ks_2samp(mogsm_samples[i], mcgsm_samples[2]) > 0.0001)

		posterior = mcgsm.posterior(input, mcgsm_samples)

		# average posterior should correspond to prior
		for k in range(mogsm.num_components):
			self.assertLess(abs(1 - mean(posterior[k]) / mogsm.priors[k]), 0.1)
    def rand(self):

        m, n = self.__m, self.__n

        s = linalg.cholesky(self.__prod).transpose()
        w = self.__weight

        # Compute the parameters of the posterior distribution.
        mu = linalg.solve(s[:m, :m], s[:m, m:])
        omega = np.dot(s[:m, :m].transpose(), s[:m, :m])
        sigma = np.dot(s[m:, m:].transpose(), s[m:, m:]) / w
        eta = w

        # Simulate the marginal Wishart distribution.
        f = linalg.solve(np.diag(np.sqrt(2.0*random.gamma(
            (eta - np.arange(n))/2.0))) + np.tril(random.randn(n, n), -1),
                         np.sqrt(eta)*linalg.cholesky(sigma).transpose())
        b = np.dot(f.transpose(), f)

        # Simulate the conditional Gauss distribution.
        a = mu + linalg.solve(linalg.cholesky(omega).transpose(),
                              np.dot(random.randn(m, n),
                                     linalg.cholesky(b).transpose()))

        return a, b
예제 #3
0
def decomposeSubsetKM(K_r, K_rr):
    """decomposes r*m kernel matrix, where r is the number of basis vectors and m the
    number of training examples
    
    @param K_r: r*m kernel matrix, where only the lines corresponding to basis vectors are present
    @type K_r: numpy matrix
    @param basis_vectors: the indices of the basis vectors
    @type basis_vectors: list of integers
    @return svals, evecs, U, C_T_inv
    @rtype tuple of numpy matrices"""
    #K_rr = K_r[:, basis_vectors]
    #C = la.cholesky(K_rr)
    try:
        C = la.cholesky(K_rr)
    except LinAlgError:
        print "Warning: chosen basis vectors not linearly independent"
        print "Shifting the diagonal of kernel matrix"
        #__shiftKmatrix(K_r, basis_vectors)
        #K_rr = K_r[:, basis_vectors]
        #C = la.cholesky(K_rr)
        C = la.cholesky(K_rr+0.000000001 * np.eye(K_rr.shape[0]))
    C_T_inv = la.inv(C.T)
    H = np.dot(K_r.T, C_T_inv)
    svals, evecs, U = decomposeDataMatrix(H.T)
    return svals, evecs, U, C_T_inv
예제 #4
0
def CCA(X, Y, eps=1.e-15):
    """
    Canonical corelation analysis of two matrices
    
    Parameters
    ----------
    X array of shape (nbitem,p) 
    Y array of shape (nbitem,q) 
    eps=1.e-15, float is a small biasing constant
                to grant invertibility of the matrices
    
    Returns
    -------
    ccs, array of shape(min(n,p,q) the canonical correlations
        
    Note
    ----
    It is expected that nbitem>>max(p,q)
    """
    from numpy.linalg import cholesky, inv, svd
    if Y.shape[0]!=X.shape[0]:
        raise ValueError,"Incompatible dimensions for X and Y"
    p = X.shape[1]
    q = Y.shape[1]
    sqX = np.dot(X.T,X)
    sqY = np.dot(Y.T,Y)
    sqX += np.trace(sqX)*eps*np.eye(p)
    sqY += np.trace(sqY)*eps*np.eye(q)
    rsqX = cholesky(sqX)
    rsqY = cholesky(sqY)
    iX = inv(rsqX).T
    iY = inv(rsqY).T
    Cxy = np.dot(np.dot(X,iX).T,np.dot(Y,iY))
    uv, ccs, vv = svd(Cxy)
    return ccs
예제 #5
0
def compute_trajectory(which_compute = False):
    
    b = 10E-4
    g = 9.8
    t = .1 #this is the delta t
    sx,sy,vx,vy = 0,0,300,600
    Q = .1*np.eye(4)
    R = 500*np.eye(2)
    F = np.array(([1.,0.,t,0.],[0.,1.,0.,t],[0.,0.,1-b,0.],[0.,0.,0.,1-b]))
    u = np.array([0,0,0,-g*t])
    x0 = np.array([sx,sy,vx,vy])
    rand_vec = np.random.rand(4)
    w = np.dot((la.cholesky(Q)).T, rand_vec)
    N = 1200
    x_list = []

    if which_compute == True:
        y = []
        H = np.eye(4)[:2]
        R = np.eye(2)*500

    for i in xrange(N):
        if which_compute == True and i+1 >= 400 and i+1 <= 600:
            y.append(np.dot(H,x0) + np.dot(la.cholesky(R),np.random.randn(2)))
        x_new = np.dot(F,x0) + u + np.dot(la.cholesky(Q),np.random.randn(4))
        x0 = x_new
        x_list.append(x_new)


    if which_compute == True:
        return np.array(x_list), np.array(y) #.shape
    else:
        return np.array(x_list)
예제 #6
0
def gp_pred(logtheta, covfunc, X, y, Xstar, R=None, w=None, Rstar=None):
    
    #else:
    #    print '        xgp_pred()'
        
    # compute training set covariance matrix (K) and
    # (marginal) test predictions (Kss = self-cov; Kstar = corss-cov)
    if R==None:
        K = feval(covfunc, logtheta, X)                     # training covariances
        [Kss, Kstar] = feval(covfunc, logtheta, X, Xstar)   # test covariances (Kss = self covariances, Kstar = cov between train and test cases)
    else:
        K = feval(covfunc, logtheta, X, R, w)               # training covariances
        [Kss, Kstar] = feval(covfunc, logtheta, X, R, w, Xstar, Rstar)   # test covariances
   # K += sn2*eye(X.shape[0]
    try:
        n = X.shape[0]
        K = K + identity(n)*sn2 #numerical stability shit
    except TypeError:
        raise Exception(str(K) + " " + str(X.shape) + " " + str(identity(sn2).shape) + " " + str(np.array(K).shape))
    try:
        L = linalg.cholesky(K,lower=True) # lower triangular matrix
    except linalg.LinAlgError:
        L = linalg.cholesky(nearPD(K),lower=True)
    #L = linalg.cholesky(K,lower=True) # lower triangular matrix    
    alpha = solve_chol(L.transpose(),y)         # compute inv(K)*y
      
    out1 = dot(Kstar.transpose(),alpha)         # predicted means
    v = linalg.solve(L, Kstar)                  
    tmp=v*v                
    out2 = Kss - array([tmp.sum(axis=0)]).transpose()  # predicted variances  

    return [out1, out2]
    def rand(self):

        dim=self.__dim__

        mu=self.__param__.mu
        omega=self.__param__.omega
        sigma=self.__param__.sigma
        eta=self.__param__.eta

        if numpy.isfinite(eta):

            # Simulate the marginal Wishart distribution.
            diag=2.0*random.gamma((eta-numpy.arange(dim))/2.0)
            fact=numpy.diag(numpy.sqrt(diag))+numpy.tril(random.randn(dim,dim),-1)
            fact=linalg.solve(fact,math.sqrt(eta)*linalg.cholesky(sigma).transpose())
            disp=numpy.dot(fact.transpose(),fact)

        else:

            # Account for the special case where the
            # marginal distribution is singular.
            disp=numpy.copy(sigma)

        if numpy.isfinite(omega):

            # Simulate the conditional Gauss distribution.
            loc=mu+numpy.dot(linalg.cholesky(disp),random.randn(dim))/math.sqrt(omega)

        else:

            # Account for the special case where the
            # conditional distribution is singular.
            loc=numpy.copy(mu)

        return loc,disp
예제 #8
0
    def test_lapack_endian(self):
        # For bug #1482
        a = array([[5.7998084, -2.1825367], [-2.1825367, 9.85910595]], dtype=">f8")
        b = array(a, dtype="<f8")

        ap = linalg.cholesky(a)
        bp = linalg.cholesky(b)
        assert_array_equal(ap, bp)
예제 #9
0
 def is_positive(matrix):
     """ All main main minors are positive
     """
     try:
         cholesky(matrix)
         return True
     except LinAlgError:
         return False
예제 #10
0
파일: LASSOModel.py 프로젝트: marty10/LASSO
    def factor(self, X, rho):
        n, p = X.shape
        if n >= p:
            L = li.cholesky(np.dot(X.T, X) + rho * np.eye(p))
        else:
            L = li.cholesky(np.eye(n) + 1.0 / rho * np.dot(X, X.T))

        return L, L.T  # L, U
예제 #11
0
def _is_sympd(M):
    'Check that the matrix M is symmetric positive definite'
    try:
        linalg.cholesky(M)  # check that is a symmetric pd
        is_sympd = True 
    except linalg.LinAlgError as err:
        is_sympd = False
    return is_sympd
예제 #12
0
파일: glds.py 프로젝트: gabrieag/glds
    def __init__(self,initmean,initvar,transgain,transnoise,measgain,measnoise):

        # Check the initial mean.
        try:
            numstate,=numpy.shape(initmean)
        except ValueError:
            raise Exception('Initial mean must be a vector.')

        # Check the initial variance.
        if numpy.shape(initvar)!=(numstate,numstate):
            raise Exception('Initial variance must be a {}-by-{} matrix.'.format(numstate,numstate))
        if not numpy.allclose(numpy.transpose(initvar),initvar):
            raise Exception('Initial variance matrix must be symmetric.')
        try:
            cholfact=linalg.cholesky(initvar)
        except linalg.LinAlgError:
            raise Exception('Initial variance matrix must be positive-definite.')

        # Check the transition gain.
        if numpy.ndim(transgain)!=2 or numpy.shape(transgain)!=(numstate,numstate):
            raise Exception('Transition gain must be a {}-by-{} matrix.'.format(numstate,numstate))

        # Check the transition noise.
        if numpy.ndim(transnoise)!=2 or numpy.shape(transnoise)!=(numstate,numstate):
            raise Exception('Transition noise must be a {}-by-{} matrix.'.format(numstate,numstate))
        if not numpy.allclose(numpy.transpose(transnoise),transnoise):
            raise Exception('Transition noise matrix must be symmetric.')
        if numpy.any(linalg.eigvalsh(transnoise)<0.0):
            raise Exception('Transition noise matrix must be positive-semi-definite.')

        # Check the measurement gain.
        try:
            numobs,numcol=numpy.shape(measgain)
        except ValueError:
            raise Exception('Measurement gain must be a matrix.')
        if numcol!=numstate:
            raise Exception('Measurement gain matrix must have {} columns.'.format(numstate))

        # Check the measurement noise.
        if numpy.ndim(measnoise)!=2 or numpy.shape(measnoise)!=(numobs,numobs):
            raise Exception('Measurement noise must be a {}-by-{} matrix.'.format(numobs,numobs))
        if not numpy.allclose(numpy.transpose(measnoise),measnoise):
            raise Exception('Measurement noise matrix must be symmetric.')
        try:
            cholfact=linalg.cholesky(measnoise)
        except linalg.LinAlgError:
            raise Exception('Measurement noise matrix must be positive-definite.')

        # Set the model.
        self.initmean=numpy.asarray(initmean)
        self.initvar=numpy.asarray(initvar)
        self.transgain=numpy.asarray(transgain)
        self.transnoise=numpy.asarray(transnoise)
        self.measgain=numpy.asarray(measgain)
        self.measnoise=numpy.asarray(measnoise)

        self.__size__=numstate,numobs
예제 #13
0
def factor(X,rho):
    m,n = X.shape
    if m>=n:
       L = cholesky(X.T.dot(X)+rho*sparse.eye(n))
    else:
       L = cholesky(sparse.eye(m)+1./rho*(X.dot(X.T)))
    L = sparse.csc_matrix(L)
    U = sparse.csc_matrix(L.T)
    return L,U
예제 #14
0
def induceRankCorr(R, Cstar):
    """Induces rank correlation Cstar onto a sample R [N x k].
    Note that it is easy to specify correlations that are not possible to generate.
    Results generated with a given Cstar should be checked.

    Iman, R. L., and W. J. Conover. 1982. A Distribution-free Approach to Inducing Rank
    Correlation Among Input Variables. Communications in Statistics: Simulation and
    Computations 11:311-334.
    
    Parameters
    ----------
    R : ndarray [N x k]
        Matrix of random samples (with no pre-existing correlation)
    Cstar : ndarray [k x k]
        Desired positive, symetric correlation matrix with ones along the diagonal.
    
    Returns
    -------
    corrR : ndarray [N x k]
        A correlated matrix of samples."""

    """Define inverse complimentary error function (erfcinv in matlab)
    x is on interval [0,2]
    its also defined in scipy.special"""
    #erfcinv = lambda x: -stats.norm.ppf(x/2)/sqrt(2)

    C = Cstar
    N, k = R.shape
    """Calculate the sample correlation matrix T"""
    T = np.corrcoef(R.T)

    """Calculate lower triangular cholesky
        decomposition of Cstar (i.e. P*P' = C)"""
    P = cholesky(C).T

    """Calculate lower triangular cholesky decomposition of T, i.e. Q*Q' = T"""
    Q = cholesky(T).T

    """S*T*S' = C"""
    S = P.dot(inv(Q))

    """Replace values in samples with corresponding
    rank-indices and convert to van der Waerden scores"""

    RvdW = -np.sqrt(2) * special.erfcinv(2*((_columnRanks(R)+1)/(N+1)))

    """Matrix RBstar has a correlation matrix exactly equal to C"""
    RBstar = RvdW.dot(S.T)
    
    """Match up the rank pairing in R according to RBstar"""
    ranks = _columnRanks(RBstar)
    sortedR = np.sort(R, axis=0)
    corrR = np.zeros(R.shape)
    for j in np.arange(k):
        corrR[:, j] = sortedR[ranks[:, j], j]

    return corrR
예제 #15
0
파일: common.py 프로젝트: tatsy/pyspsolve
def factor(A, rho):
    m, n = A.shape
    if m >= n:  # if skinny
        AA = np.dot(A.T, A) + rho * np.identity(n)
        L = npl.cholesky(AA)
    else:  # if fat
        AA = np.indentity(m) + 1.0 / rho * np.dot(A, A.T)
        L = npl.cholesky(AA)
    U = L.T
    return L, U
예제 #16
0
파일: lasso.py 프로젝트: johmathe/research
def factor(A, rho):
    m, n = A.shape;
    At = np.transpose(A)

    if m >= n:    # if skinny
        L = npl.cholesky(np.dot(At, A) + rho * np.eye(n));
    else:         # if fat
        L = npl.cholesky(np.eye(m) + 1 / rho * np.dot(A, At));

    U = np.transpose(L)

    return (L, U)
예제 #17
0
def factor(A, rho):
    """ Returns the factorisation of rho*I + At*A"""
    m, n = A.shape;
    At = np.transpose(A)

    if m >= n:    # if skinny
        L = npl.cholesky(np.dot(At, A) + rho * np.eye(n));
    else:         # if fat
        L = npl.cholesky(np.eye(m) + (1 / float(rho)) * np.dot(A, At));

    U = np.transpose(L)

    return (L, U)
예제 #18
0
 def _is_pos_def(self, A):
     """
     Check if matrix A is positive definite. Code from
     https://stackoverflow.com/questions/16266720/find-out-if-matrix-is-positive-definite-with-numpy
     """
     if allclose(A, A.T, rtol=1e-10, atol=1e-12):
         try:
             cholesky(A)
             return True
         except LinAlgError:
             return False
     else:
         return False
예제 #19
0
    def compute_cholesky(self, x, train=False):
        """
        Computes the cholesky decomposition

        """
        Kxx = self.kernel(x, x)
        # import pylab as pl
        # pl.imshow(Kxx)
        # pl.show()
        if not train:
            self.cholesky = cholesky(Kxx)
        else:
            self.train_cholesky = cholesky(Kxx)
예제 #20
0
def decomposeSubsetKM(K_r, bvectors):
    K_rr = getBVKM(K_r, bvectors)
    try:
        C = cholesky(K_rr)
    except LinAlgError:
        print "Warning: chosen basis vectors not linearly independent"
        print "Shifting the diagonal of kernel matrix"
        shiftKmatrix(K_r, bvectors)
        K_rr = getBVKM(K_r, bvectors)
        C = cholesky(K_rr)
    C_T_inv = inv(C.T)
    H = (K_r).T*C_T_inv
    svals, evecs, U = Decompositions.decomposeDataMatrix(H.T)
    #Z = C_T_inv*U
    return svals, evecs, U, C_T_inv
예제 #21
0
파일: dlm.py 프로젝트: bcrestel/timeseries
def KalmanFilter(dataset, m0, C0, Ft, Gt, V, W):
    """ Compute Kalman filter on dataset 
    starting with initial distribution N(m0, C0).
    Inputs:
        dataset = np.array containing data -- (time steps) x (observations)
        m0 = mean of initial state
        C0 = covariance matrix of initial state
        Ft, Gt = list of matrices defining the DLM
        V, W = covariance matrices for observation and model
    Outputs:
        m_all = means of state estimate (theta_t | y_{1:t})
        C_all = covariance matrices of state estimate
        a_all = means for state predictive (theta_t | y_{1:t-1})
        R_all = covariance matrices for state predictive """
    timesteps = len(dataset)
    nbobs = dataset.size/timesteps
    param = m0.size
    m = m0.reshape((param, 1))
    C = C0
    m_all = np.zeros((timesteps, param))
    a_all = np.zeros((timesteps, param))
    C_all, R_all = [], []
    ii = 0
    for YT, F, G in zip(dataset, Ft, Gt):
        Y = YT.reshape((nbobs,1))
        # State predictive
        a = G.dot(m)
        Csq = cholesky(C)
        GCsq = G.dot(Csq)
        R = W + GCsq.dot(GCsq.T)
        a_all[ii,:] = a.T
        R_all.append(R)
        # Intermediate step
        e = Y - F.dot(a)
        Rsq = cholesky(R)
        FRsq = F.dot(Rsq)
        Q = V + FRsq.dot(FRsq.T)
        Qinv = np.linalg.inv(Q)
        Qinvsq = cholesky(Qinv)
        # State estimate
        RFt = R.dot(F.T)
        RFtQsq = RFt.dot(Qinvsq)
        m = a + RFt.dot(Qinv.dot(e))
        C = R - RFtQsq.dot(RFtQsq.T)
        m_all[ii,:] = m.T
        C_all.append(C)
        ii += 1
    return m_all, C_all, a_all, R_all
예제 #22
0
파일: drift.py 프로젝트: ainafp/pyhrf
def sampleDrift(varInvSigma_drift, ptLambdaY, dim):

    mean_drift = np.linalg.solve(varInvSigma_drift, ptLambdaY)
    choleskyInvSigma_drift = cholesky(varInvSigma_drift).transpose()
    drift = np.linalg.solve(choleskyInvSigma_drift, random.randn(dim))
    drift += mean_drift
    return drift
예제 #23
0
 def compute_constants(self, y):
     """
     Precomputes constants of the log density of the proposal distribution,
     which is Gaussian as p(x|y) ~ N(mu, R)
     where
     mu = y -a
     a = 0
     R  = gamma^2 I + M M^T
     M  = 2 [\nabla_x k(x,z_i]|_x=y
     
     Returns (mu,L_R), where L_R is lower Cholesky factor of R
     """
     assert(len(shape(y))==1)
     
     # M = 2 [\nabla_x k(x,z_i]|_x=y
     if self.Z is None:
         R = self.gamma ** 2 * eye(len(y))
     else:
         M = 2 * self.kernel.gradient(y, self.Z)
         # R = gamma^2 I + \nu^2 * M H M^T
         H = Kernel.centring_matrix(len(self.Z))
         R = self.gamma ** 2 * eye(len(y)) + self.nu2 * M.T.dot(H.dot(M))
         
     L_R = cholesky(R)
     
     return y.copy(), L_R
예제 #24
0
  def __call__(self, function, point, state):
    """
    Computes Goldfeld step 
    """
    g = function.gradient(point)
    state['gradient'] = g
    G = function.hessian(point)
    state['hessian'] = G
    c = 1e-8 # is this one best?
    
    d0 = None

    try:
        L = cholesky(G)
        # reach here => isPositiveDefinite = True
        step = n_solve(L.T, n_solve(L, -g))
    except:
        # isPositiveDefinite = False
        G_eigvals = eigvalsh(G)
        minEig = min(G_eigvals)
        if minEig < 0:
            shift = -minEig + c
            
            #avoiding sparse case with big nVars
            for i in xrange(point):  G[i,i] += shift
                
        step = n_solve(G, -g)

    state['direction'] = step
    return step
예제 #25
0
    def __call__(self, function, point, state):
        """
    Computes Goldstein-Price step 
    """
        g = function.gradient(point)
        state["gradient"] = g
        G = function.hessian(point)
        state["hessian"] = G

        isPositiveDefinite = True
        d0 = None

        try:
            L = cholesky(G)
            d0 = n_solve(L.T, n_solve(L, -g))
        except:
            isPositiveDefinite = False

        if isPositiveDefinite:
            cosTheta = dot(d0, -g) / (norm(d0) * norm(g))
            if cosTheta >= self.nu:
                step = d0
            else:
                step = -g
        else:
            step = -g

        state["direction"] = step
        return step
예제 #26
0
    def sigmaPoints(mean, covariance):
        """
        Generate sigma points around the given mean values based on covariance.

        Implements the symmetric sigma point set of Julier and Uhlmann 2004,
        'Unscented Filtering and Nonlinear Estimation', equation 12. Weighting
        applied to first sigma point is 1/3 based on the assumption of Gaussian
        distributions.

        @return: List of sigma point column vectors, list of weights.
        """
        N = len(mean)
        mean = np.reshape(mean, (N,1))
        assert covariance.shape == (N,N)

        sigmaPoints = [mean] * (2*N + 1)
        w0 = 1/3 # based on assumption of Gaussian distributions.

        cholesky = linalg.cholesky((N/(1-w0)) * covariance)
        # cholesky returns A s.t. A*A.T = P so we use the columns of A
        columns = np.hsplit(cholesky, N)
        for i, column in enumerate(columns):
            sigmaPoints[i+1] = mean + column
            sigmaPoints[i+1+N] = mean - column
        weights = [w0] + [(1-w0)/(2*N)] * (2 * N)
        return sigmaPoints, weights
예제 #27
0
    def _normsq(self, X, axis=-1):
        """
        Compute the (periodic, i.e. on a torus) squared distance needed for
        FFT smoothing. Assumes coordinate system is linear.

        Parameters
        ----------
        X : array
           array of points
        axis : int, optional
           axis containing coordinates. Default -1
        """
        # copy X
        _X = np.array(X)
        # roll coordinate axis to front
        _X = np.rollaxis(_X, axis)
        # convert coordinates to FWHM units
        if self.fwhm is not 1.0:
            f = fwhm2sigma(self.fwhm)
            if f.shape == ():
                f = np.ones(len(self.bshape)) * f
            for i in range(len(self.bshape)):
                _X[i] /= f[i]
        # whiten?
        if self.cov != None:
            _chol = npl.cholesky(self.cov)
            _X = np.dot(npl.inv(_chol), _X)
        # compute squared distance
        D2 = np.sum(_X**2, axis=0)
        return D2
예제 #28
0
 def _update(self):
     """
     Calculate those terms for prediction that do not depend on predictive
     inputs.
     """
     from numpy.linalg import cholesky, solve, LinAlgError
     from numpy import transpose, eye, matrix
     import types
     self._K = self.calc_covariance(self.X)
     if not self._K.shape[0]:  # we didn't have any data
         self._L = matrix(zeros((0, 0), numpy.float64))
         self._alpha = matrix(zeros((0, 1), numpy.float64))
         self.LL = 0.
     else:
         try:
             self._L = matrix(cholesky(self._K))
         except LinAlgError as detail:
             raise RuntimeError("""Cholesky decomposition of covariance """
                                """matrix failed. Your kernel may not be positive """
                                """definite. Scipy complained: %s""" % detail)
         self._alpha = solve(self._L.T, solve(self._L, self.y))
         self.LL = (
             - self.n * math.log(2.0 * math.pi)
             - (self.y.T * self._alpha)[0, 0]
         ) / 2.0
     # print self.LL
     # import IPython; IPython.Debugger.Pdb().set_trace()
     self.LL -= log(diagonal(self._L)).sum()
예제 #29
0
def generate_rho_matrix(distribution, params, num_otus, iters):
    """Make a rho matrix according to the given distribution and parameters.
    For physically meaningful correlations you must create a positive-definite
    rho matrix. If it is not positive definite it implies a correlation that 
    can't exist. To test for if the matrix is PD the only measure I have found
    is the Cholesky decomposition. It is O(n**3), but it appears to be the only
    necessary and sufficient test for positive definiteness. 

    A positive definite matrix can be created with matrix multiplication A*A.T
    but this will alter the main diagonal away from 1 (called unit diagonal in 
    the literature). 

    If a uniform distribution is passed, you can guarantee that the random 
    rho matrix will be PD if the the bounds of the distribution are
    +- 1/(num_otus-1) according to:
    http://stats.stackexchange.com/questions/13368/off-diagonal-range-for-guaranteed-positive-definiteness
    
    The code will attempt to draw from the distribution you specified iter 
    number of times to create a positive definite matrix. If all iters trials 
    fail it will return an error. 
    """
    # draw from the distribution, reshape to be num_otusXnum_otus array
    for i in range(iters):
        rho = distribution.rvs(*params,
            size=num_otus**2).reshape(num_otus,num_otus)
        sym_rho = make_symmetric(rho, trace_1=True)
        try:
            _ = cholesky(sym_rho)
            print '%s iters were required to draw pos-def rho matrix.' % (i+1)
            return sym_rho
        except LinAlgError:
            pass
    raise ValueError('A symmetric, positive definite matrix could not '+\
        'be computed with the given inputs and random draw.')
예제 #30
0
def generate(mu, Sigma, sampleNo):

    R = cholesky(Sigma)
    arr = np.dot(np.random.randn(sampleNo, 2), R) + mu

    return arr
def genEllipticalData(d, n, mu, cov):
    X = genSphericalData(d, n, np.zeros((d, )), 1)
    L = nplin.cholesky(cov)
    X = np.matmul(X, L) + mu
    return X
예제 #32
0
def get_filling(seq, labels):
    counts = dict(Counter(labels))
    if len(counts) < len(seq):
        for i, _ in enumerate(seq):
            counts.setdefault(i, 0)
    best_value = -len(labels)
    best_filling = dict()
    for p_seq in permutations(seq):
        filling = dict()
        for s, item in zip(seq, counts.items()):
            label, count = item[0], item[1]
            filling[label] = count - s
        val = sum([v for v in filling.values() if v < 0])
        if best_value < val:
            best_value = val
            best_filling = filling
    return best_filling


if __name__ == "__main__":
    W = test_graph()
    seq = [3, 2, 2]
    k = len(seq)
    relax = solve_sdp_program(W, k)
    L = cholesky(relax)
    res = find_partition(L, W, k)
    labels = balance(L, seq, res.get('simplex'), res.get('labels'),
                     res.get('random_vectors'))
    s = get_sum_of_weights(labels, W)
    print(s)
예제 #33
0
def heuristicMomentMatching(tgtMoments, tgtCorrMtx, n_scenario):
    '''
    given target 4 moments (mean, stdev, skewness, kurtosis)
    and correlation matrix
    @param tgtMoments, numpy.array, size: n_rv * 4
    @param tgtCorrMtx, numpy.array, size: n_rv * n_rv
    @param n_scenario, positive integer
    '''
    EPS = 1e-3
    MaxErrMoment = 1e-3
    MaxErrCorr = 1e-3
    MaxCubIter = 2
    MaxIter = 20
    MaxStartTrial = 20

    assert tgtMoments.shape[1] == 4
    assert tgtMoments.shape[0] == tgtCorrMtx.shape[0] == tgtCorrMtx.shape[1]

    n_rv = tgtMoments.shape[0]
    outMtx = np.empty((n_rv, n_scenario))

    #target origin moments, size: (n_rv * 4)
    MOM = np.zeros((n_rv, 4))
    MOM[:, 1] = 1
    MOM[:, 2] = tgtMoments[:, 2] / (tgtMoments[:, 1]**3)  #skew/(std**3)
    MOM[:, 3] = tgtMoments[:, 2] / (tgtMoments[:, 1]**4)  #skew/(std**4)

    #抽出moments與targetMoments相同的樣本
    #cubic transform, find good start points
    for rv in xrange(n_rv):
        cubErr, bestErr = float('inf'), float('inf')

        for _ in xrange(MaxStartTrial):
            tmpOut = np.random.rand(n_scenario)

            for _ in xrange(MaxCubIter):
                EY = MOM[rv, :]
                EX = np.fromiter(((tmpOut**(order + 1)).mean()
                                  for order in xrange(12)), np.float)

                sol = spopt.root(cubicTransform, (0, 1, 0, 0),
                                 args=(EY, EX),
                                 method="broyden1")
                cubParams = sol.x
                root = cubicTransform(cubParams, EY, EX)
                cubErr = np.sum(np.abs(root))

                if cubErr < EPS:
                    print "early stop"
                    break
                else:
                    #update random sample(a+bx+cx^2+dx^3)
                    tmpOut = (cubParams[0] + cubParams[1] * tmpOut +
                              cubParams[2] * (tmpOut**2) + cubParams[3] *
                              (tmpOut**3))

            if cubErr < bestErr:
                bestErr = cubErr
                outMtx[rv, :] = tmpOut

    #computing starting properties and error
    outMoments = np.empty((n_rv, 4))
    outMoments[:, 0] = outMtx.mean(axis=1)
    outMoments[:, 1] = outMtx.std(axis=1)
    outMoments[:, 2] = spstats.skew(outMtx, axis=1)
    outMoments[:, 3] = spstats.kurtosis(outMtx, axis=1)
    outCorrMtx = np.corrcoef(outMtx)

    errMoment = RMSE(outMoments, tgtMoments)
    errCorr = RMSE(outCorrMtx, tgtCorrMtx)
    print 'start errMoments:%s, errCorr:%s' % (errMoment, errCorr)

    #Cholesky decomposition
    L = la.cholesky(tgtCorrMtx)

    #main iteration of the algorithm
    for _ in xrange(MaxIter):
        Lp = la.cholesky(outCorrMtx)
        LpInv = la.inv(Lp)
        transMtx = L.dot(LpInv)
        tmpOutMtx = transMtx.dot(outMtx)

        #update statistics
        outMoments[:, 0] = tmpOutMtx.mean(axis=1)
        outMoments[:, 1] = tmpOutMtx.std(axis=1)
        outMoments[:, 2] = spstats.skew(tmpOutMtx, axis=1)
        outMoments[:, 3] = spstats.kurtosis(tmpOutMtx, axis=1)
        outCorrMtx = np.corrcoef(tmpOutMtx)

        errMoment = RMSE(outMoments, tgtMoments)
        errCorr = RMSE(outCorrMtx, tgtCorrMtx)

        #cubic transform
        for rv in xrange(n_rv):
            tmpOut = tmpOutMtx[rv, :]
            for _ in xrange(MaxCubIter):
                EY = MOM[rv, :]
                EX = np.fromiter(((tmpOut**(order + 1)).mean()
                                  for order in xrange(12)), np.float)

                sol = spopt.root(cubicTransform,
                                 np.random.rand(4),
                                 args=(EY, EX))

                cubParams = sol.x

                #update tmpOut y=a+bx+cx^2+dx^3
                outMtx[rv, :] = (cubParams[0] + cubParams[1] * tmpOut +
                                 cubParams[2] * (tmpOut**2) + cubParams[3] *
                                 (tmpOut**3))

                cubErr = RMSE(tmpOut, outMtx[rv, :])
                if cubErr < EPS:
                    break
                else:
                    #update random sample(a+bx+cx^2+dx^3)
                    tmpOut = outMtx[rv, :]

        #update statistics
        outMoments[:, 0] = outMtx.mean(axis=1)
        outMoments[:, 1] = outMtx.std(axis=1)
        outMoments[:, 2] = spstats.skew(outMtx, axis=1)
        outMoments[:, 3] = spstats.kurtosis(outMtx, axis=1)
        outCorrMtx = np.corrcoef(outMtx)

        errMoment = RMSE(outMoments, tgtMoments)
        errCorr = RMSE(outCorrMtx, tgtCorrMtx)

        if errMoment <= MaxErrMoment and errCorr <= MaxErrCorr:
            break

    #rescale samples
    outMtx = tgtMoments[:,
                        0][:, np.
                           newaxis] + tgtMoments[:, 1][:, np.newaxis] * outMtx

    return outMtx
예제 #34
0
def heuristic_moment_matching(tgt_moments, tgt_corrs,
                              n_scenario=200, bias=False,
                              max_moment_err=1e-3, max_corr_err=1e-3,
                              max_cubic_err=1e-5, verbose=False):
    """
    Parameters:
    --------------
    tgt_moments:, numpy.array,shape: (n_rv * 4), 1~4 central moments
    tgt_corrs:, numpy.array, size: shape: (n_rv * n_rv), correlation matrix
    n_scenario:, positive integer, number of scenario to generate
    bias: boolean,
        - True means biased estimators,
        - False means unbiased estimators
    max_err_moment: float, max moment of error between tgt_moments and
        sample moments
    max_err_corr: float, max moment of error between tgt_corrs and
        sample correlation matrix

    Returns:
    -------------
    out_mtx: numpy.array, shape:(n_rv, n_scenario)
    """

    # check variable
    assert n_scenario >= 0
    assert tgt_moments.shape[0] == tgt_corrs.shape[0] == tgt_corrs.shape[1]
    t0 = time()

    # parameters
    n_rv = tgt_moments.shape[0]

    # iteration for find good start samples
    max_start_iter = 5

    # cubic transform iteration
    max_cubic_iter = 2

    # main iteration of moment matching loop
    max_main_iter = 20

    # out mtx, for storing scenarios
    out_mtx = np.empty((n_rv, n_scenario))

    # ***********************************************************
    # scenario moments Y shape: (n_rv, 4)
    # After generating scenario S, we can transform S = aS+b
    # where the 1~4 moments of S are the same as Y
    # ***********************************************************
    y_moments = np.zeros((n_rv, 4))

    if bias:
        y_moments[:, 1] = 1
        y_moments[:, 2] = tgt_moments[:, 2]
        y_moments[:, 3] = tgt_moments[:, 3] + 3
    else:
        ns = float(n_scenario)
        ns_m1 = ns - 1
        ns_m1_2 = ns_m1 * ns_m1
        ns_m2 = ns - 2
        ns_m3 = ns - 3
        ns2 = ns * ns

        y_moments[:, 1] = ns_m1 / ns
        y_moments[:, 2] = (tgt_moments[:, 2] * ns_m1 * ns_m2 / ns2)
        y_moments[:, 3] = ((tgt_moments[:, 3] + 3 * ns_m1_2 / ns_m2 /
                            ns_m3) * ns_m2 * ns_m3 * ns_m1_2 / (ns2 - 1) / ns2)

    # find good start moment matrix (with err_moment converge)
    for rv in xrange(n_rv):
        cubic_err, best_cub_err = float('inf'), float('inf')

        # loop until errMom converge
        for _ in xrange(max_start_iter):
            # each random variable consists of n_scenario random sample
            tmp_out = np.random.rand(n_scenario)

            # 1~4th moments of the random variable, shape (4, )
            ey = y_moments[rv, :]

            # loop until cubic transform converge
            for cub_iter in xrange(max_cubic_iter):

                # 1~12th moments of the random samples
                ex = np.array([(tmp_out ** (idx + 1)).mean()
                               for idx in xrange(12)])

                # find corresponding cubic parameters
                x_init = np.array([0., 1., 0., 0.])
                out = spopt.leastsq(cubic_function, x_init, args=(ex, ey),
                                    full_output=True, ftol=1E-12,
                                    xtol=1E-12)
                cubic_params = out[0]
                cubic_err = np.sum(out[2]['fvec'] ** 2)

                # update random samples
                tmp_out = (cubic_params[0] +
                           cubic_params[1] * tmp_out +
                           cubic_params[2] * (tmp_out ** 2) +
                           cubic_params[3] * (tmp_out ** 3))

                if cubic_err < max_cubic_err:
                    # find good samples
                    break
                else:
                    if verbose:
                        print ("rv:{}, cubiter:{}, cubErr: {}, "
                               "not converge".format(rv, cub_iter, cubic_err))

            # accept current samples
            if cubic_err < best_cub_err:
                best_cub_err = cubic_err
                out_mtx[rv, :] = tmp_out

    # computing starting properties and error
    # correct moment, wrong correlation

    moments_err, corrs_err = error_statistics(out_mtx, y_moments,
                                              tgt_corrs)
    if verbose:
        print ('start mtx (orig) y_moment_err:{}, corr_err:{}'.format(
            moments_err, corrs_err))

    # Cholesky decomposition of target corr mtx
    c_lower = la.cholesky(tgt_corrs)

    # main iteration, break when converge
    for main_iter in xrange(max_main_iter):
        if moments_err < max_moment_err and corrs_err < max_corr_err:
            break

        # transform matrix
        out_corrs = np.corrcoef(out_mtx)
        co_inv = la.inv(la.cholesky(out_corrs))
        l_vec = np.dot(c_lower, co_inv)
        out_mtx = np.dot(l_vec, out_mtx)

        # wrong moment, but correct correlation
        moments_err, corrs_err = error_statistics(out_mtx, y_moments,
                                                  tgt_corrs)
        if verbose:
            print ('main_iter:{} cholesky transform (orig) y_moment_err:{}, '
                   'corr_err:{}'.format(main_iter, moments_err, corrs_err))

        # after Cholesky decompsition ,the corr_err converges,
        # but the moment error may enlarge, hence it requires
        # cubic transform again
        for rv in xrange(n_rv):
            cubic_err = float('inf')
            tmp_out = out_mtx[rv, :]
            ey = y_moments[rv, :]

            # loop until cubic transform error converge
            for cub_iter in xrange(max_cubic_iter):
                ex = np.array([(tmp_out ** (idx + 1)).mean()
                               for idx in xrange(12)])
                X_init = np.array([0., 1., 0., 0.])
                out = spopt.leastsq(cubic_function, X_init, args=(ex, ey),
                                    full_output=True, ftol=1E-12, xtol=1E-12)
                cubic_params = out[0]
                cubic_err = np.sum(out[2]['fvec'] ** 2)

                tmp_out = (cubic_params[0] +
                           cubic_params[1] * tmp_out +
                           cubic_params[2] * (tmp_out ** 2) +
                           cubic_params[3] * (tmp_out ** 3))

                if cubic_err < max_cubic_err:
                    out_mtx[rv, :] = tmp_out
                    break
                else:
                    if verbose:
                        print ("main_iter:{}, rv: {}, "
                               "(orig) cub_iter:{}, "
                               "cubErr: {}, not converge".format(
                            main_iter, rv, cub_iter, cubic_err))

        moments_err, corrs_err = error_statistics(out_mtx, y_moments,
                                                  tgt_corrs)
        if verbose:
            print ('main_iter:{} cubic_transform, (orig) y_moment eror:{}, '
                   'corr err: {}'.format(main_iter, moments_err, corrs_err))

    # end of main iteration, post-processing

    # rescale scenario to original moments, out_mtx shape:(n_rv, n_scenario)
    out_mtx = (out_mtx * tgt_moments[:, 1][:, np.newaxis] +
               tgt_moments[:, 0][:, np.newaxis])

    out_central_moments = np.empty((n_rv, 4))
    out_central_moments[:, 0] = out_mtx.mean(axis=1)
    if bias:
        out_central_moments[:, 1] = out_mtx.std(axis=1)
        out_central_moments[:, 2] = spstats.skew(out_mtx, axis=1)
        out_central_moments[:, 3] = spstats.kurtosis(out_mtx, axis=1)
    else:
        # unbiased estimator
        out_central_moments[:, 1] = out_mtx.std(axis=1, ddof=1)
        out_central_moments[:, 2] = spstats.skew(out_mtx, axis=1, bias=False)
        out_central_moments[:, 3] = spstats.kurtosis(out_mtx, axis=1,
                                                     bias=False)
    out_corrs = np.corrcoef(out_mtx)

    if verbose:
        print ("1st tgt moments difference {}".format(
            (tgt_moments[:, 0] - out_central_moments[:, 0]).sum()))
        print ("2nd tgt moments difference {}".format(
            (tgt_moments[:, 1] - out_central_moments[:, 1]).sum()))
        print ("3th tgt moments difference {}".format(
            (tgt_moments[:, 2] - out_central_moments[:, 2]).sum()))
        print ("4th tgt moments difference {}".format(
            (tgt_moments[:, 3] - out_central_moments[:, 3]).sum()))
        print ("tgt corr difference {}".format(
            (tgt_corrs - np.corrcoef(out_mtx)).sum()))

    moments_err = rmse(out_central_moments, tgt_moments)
    corrs_err = rmse(out_corrs, tgt_corrs)

    if verbose:
        print ('sample central moment err:{}, corr err:{}'.format(
            moments_err, corrs_err))
        print ("HeuristicMomentMatching elapsed {:.3f} secs".format(
            time() - t0))

    if moments_err > max_moment_err or corrs_err > max_corr_err:
        raise ValueError("out mtx not converge, moment error: {}, "
                         "corr err:{}".format(moments_err, corrs_err))

    return out_mtx
예제 #35
0
import numpy as np
import pandas as pd
from numpy.linalg import cholesky
import matplotlib.pyplot as plt

num = 50
col = []
for i in np.arange(num):
    col.append('X' + str(i + 1))

#二维正态
miu2 = np.array([2, 7])
sigma2 = np.array([[1, 1.5], [1.5, 3]])
r2 = cholesky(sigma2)
s2 = np.dot(np.random.randn(num, 2), r2) + miu2
plt.plot(s2[:, 0], s2[:, 1], '+')
plt.show()
s2 = pd.DataFrame(s2.T, columns=col)
s2.T.to_csv('C:/Users/DELL/desktop/多维正态/二维正态.csv')

#三维正态
miu3 = np.array([2, 5, 8])
sigma3 = np.array([[1, 1.5, 0.9], [1.5, 3, 1.7], [0.9, 1.7, 5]])
r3 = cholesky(sigma3)
s3 = np.dot(np.random.randn(num, 3), r3) + miu3
s3 = pd.DataFrame(s3.T, columns=col)
s3.T.to_csv('C:/Users/DELL/desktop/多维正态/三维正态.csv')

#四维正态
miu4 = np.array([2, 5, 8, -5])
sigma4 = np.array([[1, 1.5, 0.9, 2.1], [1.5, 3, 1.7, 1.8], [0.9, 1.7, 5, 3],
예제 #36
0
    def run(self, HData,
                  pc_input=None, save_pc=None, gn_input=None, save_gn=None, save_directory_GNOp=None,
                  show=1):
        """
        Return harmonised parameters and diagnostic data for input harmonisaton match-up data

        :type HData: harm_data_writer.HarmInputData
        :param HData: Input match-up data object



        :return:
            :a: *numpy.ndarray*

            Harmonised parameters
        """

        Transform2NormIndOp = Transform2NormInd()

        ################################################################################################################
        # 1.	Compute Approximate Solution to find Pre-conditioner to Full Problem
        ################################################################################################################

        return_covariance = True
        if save_directory_GNOp is not None:
            return_covariance = False

        if pc_input is None:
            if show != 0:
                print "- Determine approximate solution to find pre-conditioner to full problem..."

            t1 = time()
            # a. sample data for preconditioning
            Sample2IndOp = Sample2Ind()
            HData_sample = Sample2IndOp.run(HData, sf=0.1, show=(show==2))

            HData_sample = Transform2NormIndOp.run(HData_sample)

            # b. determine preconditioner solution
            print "Beginning Solver..."
            PCOp = HarmonisationEIVPC()
            preconditioner = PCOp.run(HData_sample)

            del HData_sample
            t2 = time()
            print "t_PC:", str(t2-t1)

        else:
            preconditioner = HarmonisationResult(pc_input)

        HData.a = preconditioner.parameter  # set PC output parameters as current parameter estimates

        if save_pc:
            preconditioner.save(save_pc, save_residuals=False)

        ################################################################################################################
        # 2.	Compute Full Solution using EIV Gauss-Newton Algorithm
        ################################################################################################################

        if show != 0:
            print "Computing full solution..."

        if gn_input is None:
            if show != 0:
                print " - Transforming to Independent Variables..."
            # a. reparameterise input data such that output data are independent quantities
            HData = Transform2NormIndOp.run(HData)

            # b. run GN algorithm on modified data
            GNOp = GNAlgo(HData, preconditioner.parameter_covariance_matrix)
        else:
            if show != 0:
                print " - Opening Transformed Independent Variables..."
            GNOp = GNAlgo(HData)
            GNOp.open(gn_input)
            if show != 0:
                print " - Applying approximate solution to pre-conditioner to full problem..."
            GNOp.S = cholesky(preconditioner.parameter_covariance_matrix)

        HarmonisationOutput = HarmonisationResult()
        HarmonisationOutput.parameter_sensors = HData.idx["parameter_sensor"]
        HarmonisationOutput.idx = deepcopy(HData._original_idx)

        HarmonisationOutput.parameter, HarmonisationOutput.parameter_covariance_matrix, \
            HarmonisationOutput.cost, HarmonisationOutput.cost_dof, \
                HarmonisationOutput.cost_p_value, HarmonisationOutput.values_res,\
                    HarmonisationOutput.ks_res, systematic_errors, systematic_error_sensors \
                        = GNOp.run(show=(show == 2), return_covariance=return_covariance)

        if systematic_errors is not None:
            HarmonisationOutput.additional_variables['systematic_errors'] = {'data': systematic_errors,
                                                                             'dim': 's',
                                                                             'dtype': 'f4',
                                                                             'Description': 'Fitted systematic errors'}

            HarmonisationOutput.additional_variables['systematic_error_sensors'] = {'data': systematic_error_sensors,
                                                                                    'dim': 's',
                                                                                    'dtype': 'S1',
                                                                                    'Description': 'Sensors for fitted'
                                                                                                   'systematic errors'}

        if save_directory_GNOp is not None:
            GNOp.save(save_directory_GNOp)

        return HarmonisationOutput
    def _d_nll_d_theta(self, x, t, theta):

        vt = np.exp(theta[1])
        n, d = np.shape(x)
        m = self.m
        theta_gc = theta[0:2 + d]
        x_m = np.reshape(theta[2 + d:], (self.m, d))

        K_NM = self.cov.cov_matrix_ij(x, x_m, theta_gc)

        K_M = self.cov.cov_matrix_ij(x_m, x_m, theta_gc)

        #L_M = cholesky(K_M+1e-5*np.eye(m))
        #L_Minv_K_NM = solve(L_M,K_NM.T)
        #Q_N =  dot(L_Minv_K_NM.T,  L_Minv_K_NM) #Q_N = dot(K_NM,dot(inv(K_M),K_NM.T))

        #K_N = self.cov.cov_matrix_ij(x,x,theta_gc)
        L_M = cholesky(K_M + np.eye(m) * 1e-5)

        #Inversion done right
        #TODO: cho_solve?
        L_M_inv = solve_triangular(L_M, np.eye(m), lower=True)
        K_M_inv = dot(L_M_inv.T, L_M_inv)
        #LI = np.diag(np.diag(K_N - Q_N)+vt*np.ones(n))

        n_theta = len(theta)
        gradient = []
        Kinv = self.inv_cov_matrix(x, theta)  #TODO: N^2 M

        dot_K_NM_K_M_inv = dot(K_NM, K_M_inv)
        dot_K_M_inv_K_NM_T = dot_K_NM_K_M_inv.T
        dot_Kinv_t = dot(Kinv, t)

        Cov_xm_xm = self.cov.cov_matrix_ij(x_m, x_m, theta_gc)
        Cov_x_xm = self.cov.cov_matrix_ij(x, x_m, theta_gc)
        Cov_x_x = self.cov.cov_matrix_ij(x, x, theta_gc)

        for j in range(0, n_theta):
            if j < 2 + d:
                if j == 1:
                    dKdj = vt * np.eye(n)
                else:
                    K_NM_d = self.cov._d_cov_matrix_d_theta_ij(x,
                                                               x_m,
                                                               theta_gc,
                                                               j,
                                                               Cov=Cov_x_xm)
                    K_M_d = self.cov._d_cov_matrix_d_theta_ij(x_m,
                                                              x_m,
                                                              theta_gc,
                                                              j,
                                                              Cov=Cov_xm_xm)
                    K_N_d = self.cov._d_cov_matrix_d_theta_ij(x,
                                                              x,
                                                              theta_gc,
                                                              j,
                                                              Cov=Cov_x_x)
                    #Derivation by the hyperparameters:

                    #print K_M_inv -inv(K_M)#
                    #print "difference: ", np.sum(np.abs(K_M_inv -inv(K_M)))

                    #dKdj = Q_N_dt + LI_dt
            else:
                i = (j - (2 + d)) / d
                dim = (j - (2 + d)) % d
                K_NM_d = self.cov._d_cov_matrix_d_xi_ij(x_m,
                                                        x,
                                                        theta_gc,
                                                        i,
                                                        dim,
                                                        Cov=Cov_x_xm.T).T  #)
                K_M_d = self.cov._d_cov_matrix_d_x(
                    x_m, theta_gc, i, dim, Cov=Cov_xm_xm).T  #,Cov=Cov_xm_xm).T
                K_N_d = np.zeros((n, n))
                #Q_N_dt = 2*dot(K_NM_d[i],dot_K_M_inv_K_NM_T) - dot(dot_K_NM_K_M_inv,dot( K_M_d,dot_K_M_inv_K_NM_T))

                #basically the same as above:
                #LI_dt = -np.diag(np.diag(Q_N_dt))		#K_N_d == Zeros

            if j != 1:
                Q_N_dt = 2 * dot(K_NM_d, dot_K_M_inv_K_NM_T) - dot(
                    dot_K_NM_K_M_inv, dot(K_M_d,
                                          dot_K_M_inv_K_NM_T))  #TODO: N^2 M
                LI_dt = np.diag(np.diag(K_N_d - Q_N_dt))
                dKdj = Q_N_dt + LI_dt

            #dKdj = self.d_cov_matrix_d_theta(x,theta,j)
            gradient.append(
                0.5 * tracedot(Kinv, dKdj) -
                0.5 * dot(dot_Kinv_t.T, dot(dKdj, dot_Kinv_t)))  #TODO: N^2 M

        return np.array(gradient)
예제 #38
0
def is_pos_def(A):
    try:
        la.cholesky(A)
        return True
    except np.linalg.LinAlgError:
        return False
예제 #39
0
def cholesky_decomposition(a):
    return linalg.cholesky(a)
예제 #40
0
 def __init__(self, mu, sigma):
     self.mu, self.sigma = np.asarray(mu), np.asarray(sigma)
     # Precompute sigma contributions
     r = cholesky(sigma)
     self._rinv = inv(r)
     self._c = 0.5 * len(mu) * log(2 * pi) + 0.5 * np.sum(diag(r))
예제 #41
0
def sim_y(X, beta, Sigma):
  n = X.shape[0]
  chol = cholesky(Sigma)
  Z = norm.rvs(size=(n, 1))
  y = X @ beta + chol.T @ Z
  return y
# This program prints the the lower-triangular matrix in Cholesky decomposition of -
# a Hilbert matrix of any given order.

# Code by: Arash Ashrafzadeh
# Homepage: https://arashmath.github.io/

# Importing necessary libraries
from numpy.linalg import cholesky  # To perform matrix decomposition
from scipy.linalg import hilbert  # To create the Hilbert matrix

# Inputting the order of the Hilbert matrix
n = int(input(" Please enter the order of Hilbert matrix: "))

# Printing the lower-triangular matrix in Cholesky decomposition for Hilbert matrix
print(cholesky(hilbert(n)))

# Lines 12 & 15 can be merged into one line as the following (but it violates the clean, readable coding
# # paradigms I guess! So it's not recommended).
# print(cholesky(hilbert(int(input()))))
예제 #43
0
    test_dims = np.arange(10, 110, 10)
    n_samples = 150

    for dim in test_dims:
        mu_base = np.array([[-1, 1], [1, -1], [3, -1]])
        Sigma1_base = np.array([[1, -0.7], [-0.7, 1]])
        Sigma2_base = np.array([[1, 0.7], [0.7, 1]])
        Sigma3_base = np.array([[1, 0.9], [0.9, 1]])

        mu = np.c_[mu_base, np.zeros((3, dim - 2))]
        Sigma1 = fill_cov(Sigma1_base, dim)
        Sigma2 = fill_cov(Sigma2_base, dim)
        Sigma3 = fill_cov(Sigma3_base, dim)

        Sigma = np.stack((Sigma1, Sigma2, Sigma3), axis=0)
        R = cholesky(Sigma)
        samples = np.ones((n_samples, 1, 1)) * mu[None, ...]

        noise = randn(n_samples, dim)
        noise = np.einsum("kjm,nj->nkm", R, noise)

        samples = samples + noise
        samples = samples.reshape(-1, dim)

        ml, map = attempt_em_fit(samples, 3, pi, Sigma)
        hist_ml.append(1 - ml)
        hist_map.append(1 - map)

    fig, ax = plt.subplots()
    ax.plot(test_dims, hist_ml, c="tab:red", marker="o", label="MLE")
    ax.plot(test_dims,
예제 #44
0
         str(coVar)[0] + str(moMatch)[0] + str(antiPaths)[0] + '_' +
         str(PY1 * 100) + '_' + str(PY2 * 100))
 seed(SEED)  # RNG seed value
 for i in range(R):  # Simulation Runs
     print "\nSimulation Run %d of %d" % (i + 1, R)
     print "----------------------------------------------------"
     print "Elapsed Time in Minutes %8.2f" % ((time() - t0) / 60)
     print "----------------------------------------------------"
     for panel in range(4):  # Panels
         # Correlation Matrix
         v0, kappa_v, sigma_v, rho = para[panel, :]
         CovMat = zeros((3, 3), 'd')
         CovMat[0, :] = [1.0, rho, 0.0]
         CovMat[1, :] = [rho, 1.0, 0.0]
         CovMat[2, :] = [0.0, 0.0, 1.0]
         CM = cholesky(CovMat)
         print "\n\n Results for Panel %d\n" % (panel + 1)
         print " v0=%3.2f, sigma_v=%3.2f, kappa_v=%3.2f, rho=%3.2f" \
             %( v0 , sigma_v , kappa_v , rho )
         print " ----------------------------------------------------"
         z = 0
         for T in tL:  # Times -to-Maturity
             B0T = B([kappa_r, theta_r, sigma_r, r0, T])
             # Discount Factor B0(T)
             r, v, S, h, V = 0.0, 0.0, 0.0, 0.0, 0.0
             # Memory Clean -up
             dt = T / M
             # Time Interval in Years
             rand = RNG(M, I)
             # Random Numbers
             r = eulerMRProc(r0, sigma_r, kappa_r, theta_r, 0, CM)
예제 #45
0
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 22 13:03:23 2017

@author: dell
"""

import numpy as np
from numpy.linalg import cholesky
import math

#设置高斯分布的sigma
sigma = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])
#sigma = np.array([[1,0.25,0.25,0.25],[0.25,1,0.25,0.25],[0.25,0.25,1,0.25],[0.25,0.25,0.25,1]])
R = cholesky(sigma)

#设置高斯分布的mu
mu_1 = np.array([[1, 1, 1, 1]])
mu_2 = np.array([[2, 2, 2, 2]])

#生成4维高斯分布数据,两个类别
s_1 = np.dot(np.random.randn(50, 4), R) + mu_1
s_2 = np.dot(np.random.randn(50, 4), R) + mu_2
l_1 = np.zeros((1, 50))
l_2 = np.ones((1, 50))

#将数组拼接 形成训练数据
list_1 = []
for data in s_1:
    list_temp = list(data)
    list_1.append(list_temp)
예제 #46
0
from scipy.optimize import check_grad
from scipy.sparse import coo_matrix

import operalib as ovk
from numpy import eye, dot, sort, array, arange, pi, cos, sin, newaxis, ones
from numpy.testing import assert_allclose
from numpy.linalg import cholesky, norm
from numpy.random import randn, rand, seed, randint

seed(0)
X = sort(200 * rand(1000, 1) - 100, axis=0)
y = array([pi * sin(X).ravel(), pi * cos(X).ravel()]).T
Tr = 2 * rand(2, 2) - 1
Tr = dot(Tr, Tr.T)
Tr = Tr / norm(Tr, 2)
U = cholesky(Tr)
y = dot(y, U)

Sigma = 2 * rand(2, 2) - 1
Sigma = dot(Sigma, Sigma.T)
Sigma = 1. * Sigma / norm(Sigma, 2)
Cov = cholesky(Sigma)
y += dot(randn(y.shape[0], y.shape[1]), Cov.T)

X_test = arange(-100.0, 100.0, .5)[:, newaxis]
y_t = dot(array([pi * sin(X_test).ravel(), pi * cos(X_test).ravel()]).T, U)


def test_ridge_grad_id():
    """Test ovk.OVKRidgeRisk gradient with finite differences."""
    K = ovk.DecomposableKernel(A=eye(2))
예제 #47
0
    def sample_arch(self, node_id, store_normal_arch, store_reduce_arch):
        num_ops = len(genotypes.PRIMITIVES)
        num_nodes = self.model._steps

        def limite_range(arch, num_ops, num_nodes):
            for i in range(num_nodes):
                arch[4 * i, ] = np.max((np.min((arch[4 * i, ], (i + 1))), 0))
                arch[4 * i + 1, ] = np.max((np.min(
                    (arch[4 * i + 1, ], num_ops - 1)), 0))
                arch[4 * i + 2, ] = np.max((np.min(
                    (arch[4 * i + 2, ], (i + 1))), 0))
                arch[4 * i + 3, ] = np.max((np.min(
                    (arch[4 * i + 3, ], num_ops - 1)), 0))
            return arch

        def get_performance(self, selec_arch):
            n_nodes = genotypes.STEPS
            normal = []
            reduction = []
            performance = np.zeros((1))
            # selec_arch=np.zeros((2*n_nodes,))
            for i in range(n_nodes):
                normal.extend([(selec_arch[4 * i], selec_arch[4 * i + 1]),
                               (selec_arch[4 * i + 2], selec_arch[4 * i + 3])])
                reduction.extend([(selec_arch[4 * i + 4 * n_nodes],
                                   selec_arch[4 * i + 1 + 4 * n_nodes]),
                                  (selec_arch[4 * i + 2 + 4 * n_nodes],
                                   selec_arch[4 * i + 3 + 4 * n_nodes])])

            arch = (normal, reduction)
            performance[0, ] = self.evaluate(arch)
            return performance[0, ]

        if node_id > 999:
            alfa = 0.01
            n = 10
            sigma = 1

            mu = np.zeros((1, 4 * self.model._steps))
            Sigma = np.eye(4 * self.model._steps)
            R = cholesky(Sigma)

            yita = np.dot(np.random.randn(n, 4 * self.model._steps), R) + mu
            n_yita = np.empty((n, 4 * self.model._steps))
            n_yita1 = np.empty((n, 4 * num_nodes))

            index0 = np.random.randint(1000)
            test_normal_arch = store_normal_arch[index0, ]
            test_reduce_arch = store_reduce_arch[index0, ]

            for i in range(n):
                n_f = self.novelty_fitness(
                    np.int_(np.round((test_normal_arch + yita[i, ]))),
                    np.int_(np.round(store_normal_arch)), 10)
                n_yita[i, ] = n_f * yita[i, ]
                select_i = limite_range((test_normal_arch + yita[i, ]),
                                        num_ops, num_nodes)
                test_arch1 = np.hstack((select_i, test_reduce_arch))
            # gf=get_performance(self,test_arch1)    ######whether take the reward into consideration
            #  n_yita1[i,]=gf*yita[i,]                 ####################whether take the reward into consideration

            #selec_normal=test_normal_arch+alfa*(1/(n*sigma))*(0.5*sum(n_yita)+0.5*sum(n_yita1))################# whether take the reward into consideration
            selec_normal = test_normal_arch + alfa * (
                1 / (n * sigma)) * sum(n_yita)

            store_normal_arch[index0, ] = selec_normal
            selec_normal = np.int_(np.round(selec_normal))
            selec_normal = limite_range(selec_normal, num_ops, num_nodes)

            yita = np.dot(np.random.randn(n, 4 * self.model._steps), R) + mu
            n_yita = np.empty((n, 4 * self.model._steps))

            index1 = np.random.randint(1000)
            test_normal_arch = store_normal_arch[index0, ]
            test_reduce_arch = store_reduce_arch[index0, ]

            for i in range(n):
                n_f = self.novelty_fitness(
                    np.int_(np.round((test_reduce_arch + yita[i, ]))),
                    np.int_(np.round(store_reduce_arch)), 10)
                n_yita[i, ] = n_f * yita[i, ]
                select_i = limite_range((test_reduce_arch + yita[i, ]),
                                        num_ops, num_nodes)
                test_arch2 = np.hstack((test_normal_arch, select_i))
            # n_yita1[i,]=get_performance(self,test_arch2)*yita[i,]######whether take the reward into consideration
        # selec_reduce=test_reduce_arch+alfa*(1/(n*sigma))*(0.5*sum(n_yita)+0.5*sum(n_yita1))######whether take the reward into consideration
            selec_reduce = test_reduce_arch + alfa * (
                1 / (n * sigma)) * sum(n_yita)
            store_reduce_arch[index1, ] = selec_reduce
            selec_reduce = np.int_(np.round(selec_reduce))
            selec_reduce = limite_range(selec_reduce, num_ops, num_nodes)

            normal = []
            reduction = []
            for i in range(self.model._steps):
                s1 = np.int(selec_normal[4 * i, ])
                s2 = np.int(selec_normal[4 * i + 1, ])
                s3 = np.int(selec_normal[4 * i + 2, ])
                s4 = np.int(selec_normal[4 * i + 3, ])
                s5 = np.int(selec_reduce[4 * i, ])
                s6 = np.int(selec_reduce[4 * i + 1, ])
                s7 = np.int(selec_reduce[4 * i + 2, ])
                s8 = np.int(selec_reduce[4 * i + 3, ])
                normal.extend([(s1, s2), (s3, s4)])
                reduction.extend([(s5, s6), (s7, s8)])
            index = (index0, index1)

        else:
            num_ops = len(genotypes.PRIMITIVES)
            n_nodes = self.model._steps

            normal = []
            reduction = []
            for i in range(n_nodes):
                ops = np.random.choice(range(num_ops), 4)
                nodes_in_normal = np.random.choice(range(i + 2),
                                                   2,
                                                   replace=False)
                nodes_in_reduce = np.random.choice(range(i + 2),
                                                   2,
                                                   replace=False)
                normal.extend([(nodes_in_normal[0], ops[0]),
                               (nodes_in_normal[1], ops[1])])

                reduction.extend([(nodes_in_reduce[0], ops[2]),
                                  (nodes_in_reduce[1], ops[3])])

            normal = np.int_(normal)
            reduction = np.int_(reduction)
            index = (node_id, node_id)


######the operations from two previous node are different
        return index, (normal, reduction)
def gaussSample(mu, sigma, n):
    A = cholesky(sigma)
    Z = np.random.normal(loc=0, scale=1, size=(len(mu), n))
    return np.dot(A, Z).T + mu
예제 #49
0
파일: ecos_.py 프로젝트: stonebig/qpsolvers
def convert_to_socp(P, q, G, h):
    """
    Convert the Quadratic Program defined by:

    .. math::

        \\begin{split}\\begin{array}{ll}
        \\mbox{minimize} & \\frac{1}{2} x^T P x + q^T x \\\\
        \\mbox{subject to} & G x \\leq h
        \\end{array}\\end{split}

    to an equivalent Second-Order Cone Program:

    .. math::

        \\begin{split}\\begin{array}{ll}
        \\mbox{minimize} & c^T_s y \\\\
        \\mbox{subject to} & G_s y \\leq_{\\cal K} h_s
        \\end{array}\\end{split}

    This function is adapted from ``ecosqp.m`` in the `ecos-matlab
    <https://github.com/embotech/ecos-matlab/>`_ repository. See the documentation in
    that script for details on this reformulation.

    Parameters
    ----------
    P : numpy.array
        Primal quadratic cost matrix.
    q : numpy.array
        Primal quadratic cost vector.
    G : numpy.array
        Linear inequality constraint matrix.
    h : numpy.array
        Linear inequality constraint vector.

    Returns
    -------
    c_socp : array
        SOCP cost vector.
    G_socp : array
        SOCP inequality matrix.
    h_socp : array
        SOCP inequality vector.
    dims : dict
        Dimension dictionary used by ECOS.
    """
    n = P.shape[1]  # dimension of QP variable
    c_socp = hstack([zeros(n), 1])  # new SOCP variable stacked as [x, t]
    L = cholesky(P)

    scale = 1.0 / sqrt(2)
    G_quad = vstack(
        [
            scale * hstack([q, -1.0]),
            hstack([-L.T, zeros((L.shape[0], 1))]),
            scale * hstack([-q, +1.0]),
        ]
    )
    h_quad = hstack([scale, zeros(L.shape[0]), scale])

    dims: Dict[str, Any] = {"q": [L.shape[0] + 2]}
    if G is None:
        G_socp = G_quad
        h_socp = h_quad
        dims["l"] = 0
    else:
        G_socp = vstack([hstack([G, zeros((G.shape[0], 1))]), G_quad])
        h_socp = hstack([h, h_quad])
        dims["l"] = G.shape[0]

    G_socp = csc_matrix(G_socp)
    return c_socp, G_socp, h_socp, dims
예제 #50
0
def gauss_ell(mu,
              va,
              dim=misc.DEF_VIS_DIM,
              npoints=misc.DEF_ELL_NP,
              level=misc.DEF_LEVEL):
    """Given a mean and covariance for multi-variate
    gaussian, returns the coordinates of the confidense ellipsoid.
    
    Compute npoints coordinates for the ellipse of confidence of given level
    (all points will be inside the ellipsoides with a probability equal to
    level).
    
    :Parameters:
        mu : ndarray
            mean of the pdf
        va : ndarray
            variance of the pdf
        dim : sequence
            sequences of two integers which represent the dimensions where to
            project the ellipsoid.
        npoints: int
            number of points to generate for the ellipse.
        level : float
            level of confidence (between 0 and 1).

    :Returns:
        Returns the coordinate x and y of the ellipse."""
    if level >= 1 or level <= 0:
        raise ValueError("level should be a scale strictly between 0 and 1."
                         "")

    mu = N.atleast_1d(mu)
    va = N.atleast_1d(va)
    d = N.shape(mu)[0]
    c = N.array(dim)

    if N.any(c < 0) or N.any(c >= d):
        raise ValueError("dim elements should be >= 0 and < %d (dimension"
                         " of the variance)" % d)
    if N.size(mu) == N.size(va):
        mode = "diag"
    else:
        if N.ndim(va) == 2:
            if N.shape(va)[0] == N.shape(va)[1]:
                mode = "full"
            else:
                raise DenError("variance not square")
        else:
            raise DenError("mean and variance are not dim conformant")

    # When X is a sample from multivariante N(mu, sigma), (X-mu)Sigma^-1(X-mu)
    # follows a Chi2(d) law. Here, we only take 2 dimension, so Chi2 with 2
    # degree of freedom (See Wasserman. This is easy to see with characteristic
    # functions)
    chi22d = chi2(2)
    mahal = N.sqrt(chi22d.ppf(level))

    # Generates a circle of npoints
    theta = N.linspace(0, 2 * N.pi, npoints)
    circle = mahal * N.array([N.cos(theta), N.sin(theta)])

    # Get the dimension which we are interested in:
    mu = mu[c]
    if mode == "diag":
        va = va[c]
        elps = N.outer(mu, N.ones(npoints))
        elps += N.dot(N.diag(N.sqrt(va)), circle)
    elif mode == "full":
        va = va[c, :][:, c]
        # Method: compute the cholesky decomp of each cov matrix, that is
        # compute cova such as va = cova * cova'
        # WARN: scipy is different than matlab here, as scipy computes a lower
        # triangular cholesky decomp:
        #   - va = cova * cova' (scipy)
        #   - va = cova' * cova (matlab)
        # So take care when comparing results with matlab !
        cova = lin.cholesky(va)
        elps = N.outer(mu, N.ones(npoints))
        elps += N.dot(cova, circle)
    else:
        raise ValueError("var mode not recognized")

    return elps[0, :], elps[1, :]
    def continue_cholesky(self,
                          x,
                          x_old,
                          chol_dict_old,
                          apply_pivot=True,
                          observed=True,
                          nugget=None,
                          regularize=True,
                          assume_full_rank=False,
                          rank_limit=0):
        """

        U = C.continue_cholesky(x, x_old, chol_dict_old[, observed=True, nugget=None,
                rank_limit=0])


        Returns {'pivots': piv, 'U': U}


        Computes incomplete Cholesky factorization of self(z,z). Here z is the
        concatenation of x and x_old. Assumes the Cholesky factorization of
        self(x_old, x_old) has already been computed.


        :Arguments:

            -   `x`: The input array on which to evaluate the Cholesky factorization.

            -   `x_old`: The input array on which the Cholesky factorization has been
                computed.

            -   `chol_dict_old`: A dictionary with kbasis_ys ['pivots', 'U']. Would be the
                output of either this method or C.cholesky().

            -   `apply_pivot`: A flag. If it's set to 'True', it returns a
                matrix U (not necessarily triangular) such that U.T*U=C(x,x).
                If it's set to 'False', the return value is a dictionary.
                Item 'pivots' is a vector of pivots, and item 'U' is an
                upper-triangular matrix (not necessarily square) such that
                U[:,argsort(piv)].T * U[:,argsort(piv)] = C(x,x).

            -   `observed`: If 'True', any observations are taken into account
                when computing the Cholesky factor. If not, the unobserved
                version of self is used.

            -   `nugget`: The 'nugget' parameter, which will essentially be
                added to the diagonal of C(x,x) before Cholesky factorizing.
                
            -   `rank_limit`: If rank_limit > 0, the factor will have at most 
                rank_limit rows.
        """
        if regularize:
            x = regularize_array(x)

        if rank_limit > 0:
            raise ValueError, 'NearlyFullRankCovariance does not accept a rank_limit argument. Use Covariance instead.'

        if rank_limit > 0:
            raise ValueError, 'NearlyFullRankCovariance does not accept a rank_limit argument. Use Covariance instead.'

        # Concatenation of the old points and new points.
        xtot = vstack((x_old, x))

        # Extract information from chol_dict_old.
        U_old = chol_dict_old['U']
        m_old = U_old.shape[0]
        piv_old = chol_dict_old['pivots']

        # Number of old points.
        N_old = x_old.shape[0]

        # Number of new points.
        N_new = x.shape[0]

        # Compute off-diagonal part of Cholesky factor
        offdiag = self.__call__(x=x_old[piv_old[:m_old], :],
                                y=x,
                                observed=observed,
                                regularize=False)
        trisolve(U_old[:, :m_old], offdiag, uplo='U', transa='T', inplace=True)

        # Compute new diagonal part of Cholesky factor
        C_new = self.__call__(x=x, y=x, observed=observed, regularize=False)
        if nugget is not None:
            for i in xrange(N_new):
                C_new[i, i] += nugget[i]
        C_new -= offdiag.T * offdiag
        if not assume_full_rank:
            U_new, m_new, piv_new = ichol_full(c=C_new,
                                               reltol=self.relative_precision)
        else:
            U_new = cholesky(C_new).T
            m_new = U_new.shape[0]
            piv_new = arange(m_new)
        U_new = asmatrix(U_new[:m_new, :])
        U = asmatrix(
            zeros((m_new + m_old, N_old + N_new), dtype=float, order='F'))

        # Top portion of U
        U[:m_old, :m_old] = U_old[:, :m_old]
        U[:m_old, N_new + m_old:] = U_old[:, m_old:]
        offdiag = offdiag[:, piv_new]
        U[:m_old, m_old:N_new + m_old] = offdiag

        # Lower portion of U
        U[m_old:, m_old:m_old + N_new] = U_new
        if m_old < N_old and m_new > 0:
            offdiag_lower = self.__call__(x=x[piv_new[:m_new], :],
                                          y=x_old[piv_old[m_old:], :],
                                          observed=observed,
                                          regularize=False)
            offdiag_lower -= offdiag[:, :m_new].T * U[:m_old, m_old + N_new:]
            trisolve(U_new[:, :m_new],
                     offdiag_lower,
                     uplo='U',
                     transa='T',
                     inplace=True)
            U[m_old:, m_old + N_new:] = offdiag_lower

        # Rank and pivots
        m = m_old + m_new
        piv = hstack((piv_old[:m_old], piv_new + N_old, piv_old[m_old:]))

        # Arrange output matrix and return.
        if m < 0:
            raise ValueError, 'Matrix does not appear positive semidefinite.'

        if not apply_pivot:
            # Useful for self.observe. U is upper triangular.
            if assume_full_rank:
                return {'pivots': piv, 'U': U, 'C_eval': C_new, 'U_new': U_new}
            else:
                return {'pivots': piv, 'U': U}

        else:
            # Useful for the user. U.T * U = C(x,x).
            return U[:, argsort(piv)]
예제 #52
0
    return L, L.T, M


A = A + A.T


def Cholesky(A):
    A = A.astype('complex')
    L = zeros_like(A)
    L[0][0] = sqrt(A[0][0])
    for i in range(shape(A)[0]):
        for j in range(shape(A)[0]):
            if i == j:
                s = 0
                for k in range(i):
                    s += L[i, k] * L[i, k]
                L[i, i] = sqrt(A[i, j] - s)
            if i > j:
                s = 0
                for k in range(i):
                    s += L[i, k] * L[j, k]
                L[i, j] = 1. / L[j, j] * (A[i, j] - s)
    return L  #,L.T, dot(L,L.T)


print(A)
#print(Cholesky(A))#print(Cholesky(A).astype(float))
B = array([[6, 3, 4, 8], [3, 6, 5, 1], [4, 5, 10, 7], [8, 1, 7, 25]])
print(Cholesky(B))
print(nl.cholesky(B))
예제 #53
0
    def generate_general_tmg(self,
                             Fc,
                             gc,
                             M,
                             mean_r,
                             initial,
                             samples=1,
                             cov=True):
        """
        Generates samples of truncated Gaussian distributed random vectors with general covariance matrix under
        constraint

        Fc * x + g >= 0.

        Random vector length will be equal to the mean vector length, specified as a parameter.

        Example usage - generation of non-negative truncated normal random vectors of size 5, with identity
        covariance matrix:
            >> import numpy as np
            >> size = 5
            >> mean = [0.1] * size
            >> cov_mtx = np.identity(size)
            >> Fc = np.identity(size)
            >> g = np.zeros((size,1))
            >> initial = np.ones((size,1))
            >> print(HMCTruncGaussian().generate_general_tmg(Fc, g, cov_mtx, mean, initial))
            [[1.5393077420852723, 0.83193549862758009, 0.17057082476061466, 0.35605405861148831, 0.54828265215645966]]

        :param Fc: constraint matrix
        :param g: constraint vector
        :param mean: mean vector of distribution (note: this is the mean after truncation of a normal distribution)
        :param cov_mtx: covariance matrix of distribution
        :param initial: initial/starting point
        :param samples: number of samples to output (default=1).
        :return: list of samples
        """
        # sanity check
        s = gc.shape[0]
        if Fc.shape[0] != s:
            print("Error: constraint dimensions do not match")
            return

        try:
            R = cholesky(M)
        except lin.LinAlgError:
            print(
                "Error: covariance or precision matrix is not positive definite"
            )
            return

        # using covariance matrix
        if cov:
            mu = np.matrix(mean_r)
            if mu.shape[1] != 1:
                mu = mu.transpose()

            g = np.matrix(gc) + np.matrix(Fc) * mu
            F = np.matrix(Fc) * R.transpose()
            initial_sample = lin.solve(R.transpose(), initial - mu)
        # using precision matrix
        else:
            r = np.matrix(mean_r)
            if r.shape[1] != 1:
                r = r.transpose()

            mu = lin.solve(R, lin.solve(R.transpose(), r))
            g = np.matrix(gc) + np.matrix(Fc) * mu
            F = lin.solve(R, np.matrix(Fc))
            initial_sample = initial - mu
            initial_sample = R * initial_sample

        dim = len(
            mu
        )  # dimension of mean vector; each sample must be of this dimension

        # define all vectors in column order; may change to list for output
        sample_matrix = []

        # more for debugging purposes
        if any(F * initial_sample + g) < 0:
            print("Error: inconsistent initial condition")
            return

        # count total number of times boundary has been touched
        bounce_count = 0

        # squared Euclidean norm of constraint matrix columns
        Fsq = np.sum(np.square(F), axis=0)
        Ft = F.transpose()
        # generate samples
        for i in range(samples):
            print("General HMC")
            stop = False
            j = -1
            # use gauss because it's faster
            initial_velocity = np.matrix([gauss(0, 1)
                                          for _ in range(dim)]).transpose()
            # print(initial_velocity)
            # initial_velocity = np.matrix('1.4090; 1.4172; 0.6715; -1.2075')
            # initial_velocity = np.matrix('-0.46510217; -0.34660608; -1.17232004; -1.89907886')
            # initial_velocity = np.matrix('0.38491682; 1.27530709; 0.7218227; -0.00850574; 0.22724687')
            previous = initial_sample.__copy__()

            x = previous.__copy__()
            T = np.pi / 2
            tt = 0

            while True:
                a = np.real(initial_velocity.__copy__())
                b = x.__copy__()

                fa = F * a
                fb = F * b

                u = np.sqrt(np.square(fa) + np.square(fb))
                # has to be arctan2 not arctan
                phi = np.arctan2(-fa, fb)

                # print(a)
                # find the locations where the constraints were hit
                pn = np.abs(np.divide(g, u))
                t1 = sys.maxsize * np.ones((dim, 1))

                collision = False
                inds = [-1] * dim
                for k in range(dim):
                    if pn[k] <= 1:
                        collision = True
                        pn[k] = 1
                        # compute the time the coordinates hit the constraint wall
                        t1[k] = -1 * phi[k] + np.arccos(
                            np.divide(-1 * g[k], u[k]))
                        inds[k] = k
                    else:
                        pn[k] = 0

                if collision:
                    # if there was a previous reflection (j > -1)
                    # and there is a potential reflection at the sample plane
                    # make sure that a new reflection at j is not found because of numerical error
                    if j > -1:
                        if pn[j] == 1:
                            cum_sum_pn = np.cumsum(pn).tolist()
                            temp = cum_sum_pn[0]

                            index_j = int(temp[j]) - 1
                            tt1 = t1[index_j]

                            if np.abs(tt1) < EPS or np.abs(tt1 -
                                                           2 * np.pi) < EPS:
                                t1[index_j] = sys.maxsize

                    mt = np.min(t1)

                    # update j
                    j = inds[int(np.argmin(t1))]
                else:
                    mt = T

                # update travel time
                tt += mt

                if tt >= T:
                    mt -= tt - T
                    stop = True

                # print(a)
                # update position and velocity
                x = a * np.sin(mt) + b * np.cos(mt)
                v = a * np.cos(mt) - b * np.sin(mt)

                if stop:
                    break

                # update new velocity
                reflected = F[j, :] * v / Fsq[0, j]
                initial_velocity = v - 2 * reflected[0, 0] * Ft[:, j]

                bounce_count += 1

            # need to transform back to unwhitened frame
            if cov:
                sample = R.transpose() * x + mu
            else:
                sample = lin.solve(R, x) + mu

            sample = sample.transpose().tolist()
            sample_matrix.append(sample[0])

        return sample_matrix
예제 #54
0
    def init(self, parameters, **kwargs):
        '''Parses parameters from user input and allocates parameter array

        '''
        if self.name is None:
            raise MatmodlabError('material did not define name attribute')

        logging.getLogger('matmodlab.mmd.simulator').info(
            'setting up {0} material'.format(self.name))

        # --- parse the input parameters
        if kwargs.get('param_names') is not None:
            param_names = kwargs['param_names']
        else:
            constants = len(parameters)
            param_names = self.param_names(constants)

        param_names = [s.upper() for s in param_names]
        if not isinstance(parameters, (dict, )):
            if len(parameters) != len(param_names):
                raise MatmodlabError('parameters and param_names have '
                                     'inconsistent lengths')
            parameters = dict(zip(param_names, parameters))

        # populate the parameters array
        params = np.zeros(len(param_names))
        errors = 0
        for (key, value) in parameters.items():
            try:
                idx = param_names.index(key.upper())
            except ValueError:
                errors += 1
                logging.error('{0}: unrecognized parameter '
                              'for model {1}'.format(key, self.name))
                continue
            try:
                params[idx] = float(value)
            except ValueError:
                errors += 1
                logging.error('parameter {0} must be a float'.format(key))

        if errors:
            raise MatmodlabError('stopping due to previous errors')

        self.parameter_names = [s.upper() for s in param_names]
        self.iparray = np.array(params)

        # --- set defaults
        self.sqa_stiff = kwargs.get('sqa_stiff', environ.sqa_stiff)
        self.num_stiff = kwargs.get('num_stiff', environ.num_stiff)
        self.iwarn_stiff = 0
        self.visco_model = None
        self.xpan = None
        self.trs_model = None
        self.initial_temp = kwargs.get('initial_temp', DEFAULT_TEMP)

        # parameter arrays
        self.iparams = keyarray(self.parameter_names, self.iparray)
        self.params = keyarray(self.parameter_names, self.iparray)

        # import the material library
        self._import_lib(libname=kwargs.get('libname'))

        # --- setup and initialize the model
        try:
            item = self.setup(**kwargs)
        except Exception as e:
            s = 'failed to setup material model with the following exception:'
            s += '\n' + ' '.join(e.args)
            raise MatmodlabError(s)

        if item is None:
            sdv_keys, sdv_vals = [], []
        else:
            try:
                sdv_keys, sdv_vals = item
            except ValueError:
                raise MatmodlabError('Expected the material setup to return '
                                     'only the sdv keys and values')

        self.num_sdv = len(sdv_keys)
        if len(sdv_vals) != len(sdv_keys):
            raise MatmodlabError('len(sdv_values) != len(sdv_keys)')
        self.sdv_keys = [s for s in sdv_keys]
        self.initial_sdv = np.array(sdv_vals, dtype=np.float64)

        # call model with zero strain rate to get initial jacobian
        time, dtime = 0, 1
        temp, dtemp = self.initial_temp, 0.
        kappa = 0
        F0, F = Eye(9), Eye(9)
        stress, stran, d = np.zeros(6), np.zeros(6), np.zeros(6)
        elec_field = np.zeros(3)
        ddsdde = self.compute_updated_state(time,
                                            dtime,
                                            temp,
                                            dtemp,
                                            kappa,
                                            F0,
                                            F,
                                            stran,
                                            d,
                                            elec_field,
                                            stress,
                                            self.initial_sdv,
                                            disp=2)

        # check that stiffness is positive definite
        try:
            cholesky(ddsdde)
        except LinAlgError:
            raise MatmodlabError(
                'initial elastic stiffness not positive definite')

        # property completions
        b = self.completions_map()

        # Check if None or empty dict
        if b is not None and b:
            a = self.params
        else:
            # Bulk modulus
            K = np.sum(ddsdde[:3, :3], axis=None) / 9.

            # Shear modulus
            # pure shear
            G = []
            G.append((2. * ddsdde[0, 0] - 3. * ddsdde[0, 1] - ddsdde[0, 2] +
                      ddsdde[1, 1] + ddsdde[1, 2]) / 6.)
            G.append((2. * ddsdde[1, 1] - 3. * ddsdde[1, 2] + ddsdde[2, 2] -
                      ddsdde[0, 1] + ddsdde[0, 2]) / 6.)
            G.append((2 * ddsdde[0, 0] - ddsdde[0, 1] - 3. * ddsdde[0, 2] +
                      ddsdde[1, 2] + ddsdde[2, 2]) / 6.)
            # simple shear
            G.extend([ddsdde[3, 3], ddsdde[4, 4], ddsdde[5, 5]])
            a = np.array([K, np.average(G)])
            b = {'K': 0, 'G': 1}

        # calculate all elastic constants
        self.completions = complete_properties(a, b)

        self.J0 = np.zeros((6, 6))
        K3 = 3. * self.completions['K']
        G = self.completions['G']
        G2 = 2. * G
        Lam = (K3 - G2) / 3.

        # set diagonal
        self.J0[np.ix_(range(3), range(3))] = Lam
        self.J0[range(3), range(3)] += G2
        self.J0[range(3, 6), range(3, 6)] = G
    def fix_singular_matrix(self,
                            singular_mat,
                            verbosity=False,
                            what2fix=None,
                            val_min_deter=1e-200,
                            val_max_cond=1e9):

        assert singular_mat.ndim == 2
        assert singular_mat.shape[0] == singular_mat.shape[1]

        # Corner case:
        cond_num = la.cond(singular_mat)
        deter = la.det(singular_mat)
        # val_min_deter = 1e-320
        # val_min_deter = 1e-200
        # val_max_cond = 1e12
        # val_max_cond = 1e9

        # Check positive definiteness:
        chol_ok = True
        try:
            la.cholesky(singular_mat)
        except Exception as inst:
            if verbosity == True:
                print(type(inst), inst.args)
            chol_ok = False

        # Check log(det)
        log_det_ok = True
        try:
            with warnings.catch_warnings():
                warnings.filterwarnings('error')
                np.log(deter)
        except Exception as inst:
            if verbosity == True:
                print(type(inst), inst.args)
            log_det_ok = False

        if cond_num <= val_max_cond and deter > val_min_deter and chol_ok == True and log_det_ok == True:
            return singular_mat
        else:
            pass
            # print("@GaussianTools.fix_singular_matrix(): singular_mat needs to be fixed")
            # if what2fix is not None: print("what2fix:",what2fix)

        # Get the order of magnitude of the largest eigenvalue in singular_mat, assuming all eigenvalues are positive:
        eigs_real = np.real(la.eigvals(singular_mat))
        largest_eig = np.amax(eigs_real)
        if largest_eig < 1e-310:
            max_ord = np.floor(np.log10(1e-310))
        else:
            max_ord = np.ceil(np.log10(largest_eig))

        # print("largest_eig: ",largest_eig)
        # print("max_ord: ",max_ord)

        # Get the order of magnitude of the smallest eigenvalue in singular_mat, assuming all eigenvalues are positive:
        smallest_eig = np.amin(eigs_real)
        if smallest_eig < 1e-310:
            min_ord = np.floor(np.log10(1e-310))
        else:
            min_ord = np.floor(np.log10(np.abs(smallest_eig)))

        # Initial factor:
        fac_init = min_ord * 2.

        if verbosity == True:
            print(
                "\n[VERBOSITY]: @GaussianTools.fix_singular_matrix(): singular_mat needs to be fixed"
            )
            print("cond_num:", cond_num)
            print("min_ord:", min_ord)
            print("max_ord:", max_ord)
            print("chol_ok:", chol_ok)
            print("log_det_ok:", log_det_ok)
            print("Before update:")
            print("==============")
            print("fac_init:", fac_init)
            print("order cond_num:", np.floor(np.log10(cond_num)))
            print("deter:", deter)
            print("eig:", la.eigvals(singular_mat))

        # Fix the matrix:
        Id = np.eye(singular_mat.shape[0])
        singular_mat_new = singular_mat
        c = 0
        singular = True
        fac = 10**(fac_init)
        while singular == True and fac_init + c < max_ord:

            # New factor:
            fac = 10**(fac_init + c)
            singular_mat_new[:, :] = singular_mat + fac * Id

            # Look for errors:
            try:
                with warnings.catch_warnings():
                    warnings.filterwarnings('error')
                    la.cholesky(singular_mat_new)
                    assert la.det(singular_mat_new) > val_min_deter
                    np.log(la.det(singular_mat_new))
                    assert la.cond(singular_mat_new) <= val_max_cond
            except Exception as inst:
                if verbosity == True:
                    print(type(inst), inst.args)
                c += 1
            else:
                singular = False

        if verbosity == True:
            print("After update:")
            print("=============")
            print("fac:", fac)
            print("order cond_num:",
                  np.floor(np.log10(la.cond(singular_mat_new))))
            print("deter:", la.det(singular_mat_new))
            print("eig:", la.eigvals(singular_mat_new))

        if singular == True:
            # pdb.set_trace()
            # raise ValueError("Matrix could not be fixed. Something is really wrong here...")
            # warnings.warn("Matrix could not be fixed. Something is really wrong here...")
            print(
                "Matrix could not be fixed. Something is really wrong here..."
            )  # Highest permission

        return singular_mat_new
예제 #56
0
    def calc(self, theta, with_gradient=True, check_stable=True, debug=False):
        """
        Model X = mu + Phi Xlags + e + Theta elags
        data is n *k dim, n = T+p. k = k
        """
        k = self.X.shape[1]
        if debug:
            print('theta=%s' % ','.join(theta.astype(str).tolist()))
        q = theta.shape[0]
        theta_1 = concatenate([np.array([1.]), theta])
        t_roots = roots(flip(theta_1))

        if (np.sum(np.abs(theta)) != 0) and\
           (check_stable and (np.where(np.abs(t_roots) < 1)[0].shape[0] > 0)):
            raise (ValueError('theta is not invertible'))
        n = self.X.shape[0]
        p = self.p
        T = n - p
        self.n = n
        self.k = self.X.shape[1]
        self.Tklog = T * k / 2.0 * log(2 * pi)
        self.Theta = theta
        self.setThetaLbd(theta, T, q, p)
        # compute for the whole length of $X, but use only from p+1 to n
        self.XThetaFull = mat_convol(self.X, self.inv_Theta)
        self.XThetaT = AdjustConvol(self.XThetaFull, self.X, self.inv_Theta, T,
                                    p)
        self.cholKb = cholesky(self.Kbar).T
        # compute for the whole length of $X, but use only from p+1 to n
        smallE = np.where(np.abs(diag(self.cholKb)) < m_SMALL_ERR)[0]
        di = np.diag_indices(self.cholKb.shape[0])[0][smallE]
        self.cholKb[(di, di)] = m_SMALL_ERR
        self.covXTheta = mult_by_K1(self.Lbd, self.cholKb, self.XThetaT)
        self.detChol = prod(diagonal(self.cholKb))
        if self.trend:
            self.hSize = k * p + 1  # size of lag matrix
            self.bhidx = 1  # begin index of the X, as opposed to the trend
        else:
            self.hSize = k * p
            self.bhidx = 0
        self.has_lag = self.trend or (p != 0)
        if not self.has_lag:
            self.Omega = self.covXTheta / T
            self.detOmega = det(self.Omega)
            if (self.detOmega <= 0):
                self.LLK = m_BIG_M_LLK
            else:
                self.LLK = self.Tklog + T/2.*log(self.detOmega) +\
                    k/2.*log(self.detChol)
            if debug:
                print(self.Omega)
                print(self.LLK)
            if with_gradient:
                self.calcGrLLK(T, k, p, q)
            # attr(wEnv$LLK, 'gradient') = wEnv$grLLK
            return self.LLK

        self.XThetaLag = full((T, self.hSize), nan)
        if self.trend:
            self.XThetaLag[:, 0] = mat_convol(ones((T, 1)),
                                              self.inv_Theta).reshape(-1)
        elif p == 0:
            self.XThetaLag[:, 0] = zeros(T, self.hSize)

        if p > 0:
            for i in range(p):
                self.XThetaLag[:, self.bhidx+i*k:self.bhidx+i*k+k] =\
                    AdjustConvol(self.XThetaFull,
                                 self.X, self.inv_Theta, T, p-i-1)

        self.covXLag = mult_by_K1(self.Lbd, self.cholKb, self.XThetaLag)
        self.covXLagXTheta = mult_by_K2(self.Lbd, self.cholKb, self.XThetaLag,
                                        self.XThetaT)

        # same as
        # MM = lm.fit(XThetaLag,Xq[(p+1):n,])
        # should recover back sigma.
        self.Phi = solve(self.covXLag, self.covXLagXTheta)
        self.Omega = (self.covXTheta - (self.covXLagXTheta.T @ self.Phi)) / T
        self.detChol = prod(diag(self.cholKb))
        self.detOmega = det(self.Omega)
        if self.detOmega <= 0:
            self.LLK = m_BIG_M_LLK
        else:
            self.LLK = self.Tklog + T / 2 * log(self.detOmega) + k * log(
                self.detChol)
        if with_gradient:
            self.calcGrLLK(T, k, p, q)

        return self.LLK
예제 #57
0
파일: psd.py 프로젝트: yig/pymanopt
 def log(self, x, y):
     c = la.cholesky(x)
     c_inv = la.inv(c)
     l = multilog(multiprod(multiprod(c_inv, y), multitransp(c_inv)),
                  pos_def=True)
     return multiprod(multiprod(c, l), multitransp(c))
예제 #58
0
    def continue_cholesky(self,
                          x,
                          x_old,
                          chol_dict_old,
                          apply_pivot=True,
                          observed=True,
                          nugget=None,
                          regularize=True,
                          assume_full_rank=False,
                          rank_limit=0):
        """

        U = C.continue_cholesky(x, x_old, chol_dict_old[, observed=True, nugget=None, 
            rank_limit=0])


        returns {'pivots': piv, 'U': U}


        Computes incomplete Cholesky factorization of self(z,z), without
        actually evaluating the matrix first. Here z is the concatenation of x
        and x_old. Assumes the Cholesky factorization of self(x_old, x_old) has
        already been computed. 


        :Arguments:

            -   `x`: The input array on which to evaluate the Cholesky factorization.

            -   `x_old`: The input array on which the Cholesky factorization has been
                computed.

            -   `chol_dict_old`: A dictionary with kbasis_ys ['pivots', 'U']. Would be the
                output of either this method or C.cholesky().

            -   `apply_pivot`: A flag. If it's set to 'True', it returns a
                matrix U (not necessarily triangular) such that U.T*U=C(x,x).
                If it's set to 'False', the return value is a dictionary.
                Item 'pivots' is a vector of pivots, and item 'U' is an
                upper-triangular matrix (not necessarily square) such that
                U[:,argsort(piv)].T * U[:,argsort(piv)] = C(x,x).

            -   `observed`: If 'True', any observations are taken into account
                when computing the Cholesky factor. If not, the unobserved
                version of self is used.

            -   `nugget`: The 'nugget' parameter, which will essentially be
                added to the diagonal of C(x,x) before Cholesky factorizing.
                
            -   `rank_limit`: If rank_limit > 0, the factor will have at most 
                rank_limit rows.
        """

        if regularize:
            x = regularize_array(x)

        # Concatenation of the old points and new points.
        xtot = vstack((x_old, x))

        # Extract information from chol_dict_old.
        U_old = chol_dict_old['U']
        m_old = U_old.shape[0]
        piv_old = chol_dict_old['pivots']

        # Number of old points.
        N_old = x_old.shape[0]

        # Number of new points.
        N_new = x.shape[0]
        if rank_limit == 0:
            m_new_max = N_new
        else:
            m_new_max = min(N_new, max(0, rank_limit - m_old))

        # get-row function
        def rowfun(i, xpiv, rowvec):
            """
            A function that can be used to overwrite an input array with superdiagonal rows.
            """
            rowvec[i:] = self.__call__(x=xpiv[i - 1, :].reshape(1, -1),
                                       y=xpiv[i:, :],
                                       regularize=False,
                                       observed=observed)

        # diagonal
        diag = self.__call__(x, y=None, regularize=False, observed=observed)

        # not really implemented yet.
        if nugget is not None:
            diag += nugget.ravel()

        # Arrange U for input to ichol. See documentation.
        U = asmatrix(
            zeros((m_new_max + m_old, N_old + N_new), dtype=float, order='F'))
        U[:m_old, :m_old] = U_old[:, :m_old]
        U[:m_old, N_new + m_old:] = U_old[:, m_old:]

        offdiag = self.__call__(x=x_old[piv_old[:m_old], :],
                                y=x,
                                observed=observed,
                                regularize=False)
        trisolve(U_old[:, :m_old], offdiag, uplo='U', transa='T', inplace=True)
        U[:m_old, m_old:N_new + m_old] = offdiag

        # Initialize pivot vector:
        # [old_posdef_pivots  new_pivots  old_singular_pivots]
        #   - old_posdef_pivots are the indices of the rows that made it into the Cholesky factor so far.
        #   - old_singular_pivots are the indices of the rows that haven't made it into the Cholesky factor so far.
        #   - new_pivots are the indices of the rows that are going to be incorporated now.
        piv = zeros(N_new + N_old, dtype=int)
        piv[:m_old] = piv_old[:m_old]
        piv[N_new + m_old:] = piv_old[m_old:]
        piv[m_old:N_new + m_old] = arange(N_new) + N_old

        # ============================================
        # = Call to Fortran function ichol_continue. =
        # ============================================

        # Early return if rank is all used up.
        if m_new_max > 0:

            # ============================================
            # = Call to Fortran function ichol_continue. =
            # ============================================
            if not assume_full_rank:
                m, piv = ichol_continue(U,
                                        diag=diag,
                                        reltol=self.relative_precision,
                                        rowfun=rowfun,
                                        piv=piv,
                                        x=xtot[piv, :],
                                        mold=m_old)
            else:
                m = m_old + N_new
                C_eval = self.__call__(x, x, observed=True, regularize=False)
                U2 = cholesky(C_eval).T
                U[m_old:, m_old:N_new + m_old] = U2

                if m_old < N_old:
                    offdiag2 = self.__call__(x=x,
                                             y=x_old[piv_old[m_old:]],
                                             observed=observed,
                                             regularize=False)
                    trisolve(U2, offdiag2, uplo='U', transa='T', inplace=True)
                    U[m_old:, N_new + m_old:] = offdiag2

        else:
            m = m_old

        # Arrange output matrix and return.
        if m < 0:
            raise ValueError, 'Matrix does not appear positive semidefinite.'

        if not apply_pivot:
            # Useful for self.observe. U is upper triangular.
            U = U[:m, :]
            if assume_full_rank:
                return {'pivots': piv, 'U': U, 'C_eval': C_eval, 'U_new': U2}
            else:
                return {'pivots': piv, 'U': U}

        else:
            # Useful for the user. U.T * U = C(x,x).
            return U[:m, argsort(piv)]
예제 #59
0
print("x^Ty: ")
xt = np.transpose(x)
print(xt.dot(y))

# ## 4.2 Cholesky decomposition
# $ A = {LL}^T $

# In[10]:

A = np.vstack([[3, 2, 2], [2, 3, 2], [2, 2, 3]])
print("A:")
print(A)

print("Cholesky(A): L")
print(npl.cholesky(A))

print("L^T")
print(np.transpose(npl.cholesky(A)))

# ## 4.3 Eigendecomposition
# 1) eigen values & eigen vectors of A
#
# $Au = \lambda u$

# In[11]:

A = np.vstack([[4, 2], [1, 3]])
print("A:")
print(A, "\n")
예제 #60
-1
 def __init__(self, mu=array([0, 0]), Sigma=eye(2), is_cholesky=False, ell=None):
     Distribution.__init__(self, len(Sigma))
     
     assert(len(shape(mu)) == 1)
     assert(max(shape(Sigma)) == len(mu))
     self.mu = mu
     self.ell = ell
     if is_cholesky: 
         self.L = Sigma
         if ell == None:
             assert(shape(Sigma)[0] == shape(Sigma)[1])
         else:
             assert(shape(Sigma)[1] == ell)
     else: 
         assert(shape(Sigma)[0] == shape(Sigma)[1])
         if ell is not None:
             self.L, _, _ = MatrixTools.low_rank_approx(Sigma, ell)
             self.L = self.L.T
             assert(shape(self.L)[1] == ell)
         else:
             try:
                 self.L = cholesky(Sigma)
             except LinAlgError:
                 # some really crude check for PSD (which only corrects for orunding errors
                 self.L = cholesky(Sigma+eye(len(Sigma))*1e-5)