def expec_a(self):
        for f in range(self.F):
            innovation = self.x[:, f:f+1] - (self.y * self.mu0_a + self.c[:, f:f+1] + self.e[:, f:f+1]) # observation minus prior over forecasters' predictions 
            K = self.s_a[f] * self.K       
            
            y_diag = np.diag(self.y.reshape(-1))
            
            var_y = np.diag(np.diag(self.cov_y))
            var_c = np.diag(np.diag(self.cov_c[f]))
            var_e = np.diag(np.diag(self.cov_e[f]))
            
            S_a = y_diag.dot(K).dot(y_diag.T) + self.mu0_a**2 * var_y + var_c + var_e
            La = cholesky(S_a, lower=True, overwrite_a=True, check_finite=False)
            B = solve_triangular(La, innovation, lower=True, overwrite_b=True, check_finite=False)
            A = solve_triangular(La.T, B, overwrite_b=True, check_finite=False)           
            V = solve_triangular(La, y_diag.dot(K), check_finite=False, lower=True)
            self.a[:, f] = self.mu0_a + K.dot(y_diag).dot(A).reshape(-1)
            self.cov_a[f] = K - V.T.dot(V)

            rate0_s = self.s0_a
            shape_s = 1 + 0.5 * self.N 
            af = self.a[:, f][:, np.newaxis]
            
            var_a = np.diag(np.diag(self.cov_a[f]))
            B = solve_triangular(self.L_K, af.dot(af.T).T + y_diag.dot(var_a).dot(y_diag.T).T, lower=True, overwrite_b=True)
            A = solve_triangular(self.L_K.T, B, overwrite_b=True)            
            
            rate_s = rate0_s + 0.5 * np.trace(A)
Example #2
0
    def varN(self,x,n,grad=False):
        temp=self._k.K(np.array(x).reshape((1,self.n1)))
        tempN=self._numberTraining+n
        sigmaVec=np.zeros((tempN,1))
        for i in xrange(tempN):
            sigmaVec[i,0]=self._k.K(np.array(x).reshape((1,self.n1)),self._Xhist[i:i+1,:])[:,0]
        A=self._k.A(self._Xhist[0:tempN,:],noise=self._noiseHist[0:tempN])
        L=np.linalg.cholesky(A)
        temp3=linalg.solve_triangular(L,sigmaVec,lower=True)
        temp2=np.dot(temp3.T,temp3)
        temp2=temp-temp2
        if grad==False:
            return temp2
        else:
            gradi=np.zeros(self.n1)
            x=np.array(x).reshape((1,self.n1))

            gradX=self.gradXKern(x,n,self)
            #gradX=np.zeros((n,self._n1))
            for j in xrange(self.n1):
              #  for i in xrange(n):
                  #  gradX[i,j]=self._k.K(x,self._X[i,:].reshape((1,self._n1)))*(2.0*self._alpha1[j]*(x[0,j]-self._X[i,j]))
                temp5=linalg.solve_triangular(L,gradX[:,j].T,lower=True)
                gradi[j]=np.dot(temp5.T,temp3)
            gradVar=-2.0*gradi
            return temp2,gradVar
Example #3
0
 def solve(a, y, negative=True):
     """
     A wrapper for solving linear system 'ax = y' using:
         1) np.linalg.solve (if scipy is not available)
         2) cholesky decompose (if scipy is available and a is not sparse)
         3) spsolve (if scipy is available a is sparse)
     If scipy is available and matrix a is not sparse and is not positive definite, LinAlgError will be thrown
     :param a        : 'a'
     :param y        : 'y'
     :param negative : If True, we'll solve '-ax = y' instead
     :return         : 'x'
     """
     if scipy_flag:
         if issparse(a):
             if negative:
                 return sparse_lin.spsolve(-a, y)
             return sparse_lin.spsolve(a, y)
         l = linalg.cholesky(a, lower=True)
         if negative:
             z = linalg.solve_triangular(-l, y, lower=True)
         else:
             z = linalg.solve_triangular(l, y, lower=True)
         return linalg.solve_triangular(l.T, z)
     if negative:
         return np.linalg.solve(-a, y)
     return np.linalg.solve(a, y)
def cholesky_solver_least_squares(part_one, part_two):
    '''
    Solves least squares problem using cholesky decomposition
    
    Parameters:
    -----------
    
    part_one: numpy array of size 'm x m', 
              Equals X.T * X
    part_two: numpy array of size 'm x 1'
              Equals X.T * Y
              
    Returns:
    --------
    Theta: numpy array of size 'm x 1'
              Vector of coefficients
    
    '''
    # R*R.T*Theta = part_two
    R = np.linalg.cholesky(part_one)
    # R*Z = part_two
    Z     = solve_triangular(R,part_two, check_finite = False, lower = True)
    # R.T*Theta = Z
    Theta = solve_triangular(R.T,Z, check_finite = False, lower = False)
    return Theta
	def predict(self, Xq, predictUncertainty = True, predictObservations = False):

		# If the model has not learned, we should not predict anything
		self.requireLearned()

		# Set the learned result
		self._kernel.theta = self.kernelHyperparams.copy()

		# Compute the prediction using the standard Gaussian Process Prediction Algorithm
		alpha = la.solve_triangular(self.L.T, la.solve_triangular(self.L, self.y, lower = True, check_finite = False), check_finite = False)
		Kq = self._kernel(self.X, Xq)
		fqExp = np.dot(Kq.T, alpha) + self.yMean

		# Compute the covariance matrix using the standard Gaussian Process Prediction Algorithm if requested
		if predictUncertainty:

			v = la.solve_triangular(self.L, Kq, lower = True, check_finite = False)
			Kqq = self._kernel(Xq, Xq)
			fqVar = Kqq - np.dot(v.T, v)

		# Depending on if the user wants the one for latent function or output function, return the correct expected values and covariance matrix
		if predictUncertainty:
			if predictObservations:
				return(fqExp, fqVar + self.sigma**2 * np.eye(fqVar.shape[0]))
			else:
				return(fqExp, fqVar)
		else:
			return(fqExp)
Example #6
0
    def fit(self, theta_start=[.1, .5, 0.], method='SLSQP', numba=True):
        """Fit DECO model to the data.

        """
        self.numba = numba
        self.param = ParamDCC(ndim=self.data.ndim)
        self.standardize_returns()
        self.param.corr_target = np.atleast_2d(np.corrcoef(self.data.std_ret.T))
        neg_ret = self.data.std_ret.T.copy()
        neg_ret[neg_ret > 0] = 0
        self.param.corr_neg_target = np.atleast_2d(np.corrcoef(neg_ret))

        # Compute lmbd to check for stationarity of Q
        factor, lower = scl.cho_factor(self.param.corr_target, lower=True)
        sandwich = scl.solve_triangular(factor, self.param.corr_neg_target,
                                        lower=lower)
        sandwich = scl.solve_triangular(factor, sandwich.T,
                                        lower=lower)
        self.lmbd = scl.eigvals(sandwich).real.max()

        options = {'disp': False, 'maxiter': int(1e6)}

        opt_out = sco.minimize(self.likelihood, theta_start,
                               method=method, options=options)

        self.param.abcorr = opt_out.x
        self.data.rho_series = pd.Series(self.data.rho_series,
                                         index=self.data.ret.index)
        self.estimate_innov()
        return opt_out
Example #7
0
def cholesky_omp(D, x, m, eps=None):
    if eps == None:
        stopping_condition = lambda: it == m
    else:
        stopping_condition = lambda: np.inner(residual, residual) <= eps
    alpha = np.dot(x,D)
    it = 1
    lam = np.abs(np.dot(x,D)).argmax()
    idx = [lam]
    L = np.ones((1,1))
    gamma = linalg.lstsq(D[:,idx],x)[0]
    residual = x - np.dot(D[:,idx],gamma)
    
    while not stopping_condition():
        lam = np.abs(np.dot(residual, D)).argmax()
        w = linalg.solve_triangular(L, np.dot(D[:,idx].T, D[:,lam]),
                lower=True, unit_diagonal=True)
        L = np.r_[np.c_[L, np.zeros(len(L))],
                np.atleast_2d(np.append(w, np.sqrt(1-np.dot(w.T, w))))]
        idx.append(lam)
        it +=1
        Ltc = linalg.solve_triangular(L, alpha[idx], lower=True)
        gamma = linalg.solve_triangular(L, Ltc, trans=1, lower=True)
        residual = x - np.dot(D[:, idx], gamma)
    
    return gamma, idx
Example #8
0
def log_gaussian_pdf(x, mu=None, Sigma=None, is_cholesky=False, compute_grad=False):
    if mu is None:
        mu = np.zeros(len(x))
    if Sigma is None:
        Sigma = np.eye(len(mu))
    
    if is_cholesky is False:
        L = np.linalg.cholesky(Sigma)
    else:
        L = Sigma
    
    assert len(x) == Sigma.shape[0]
    assert len(x) == Sigma.shape[1]
    assert len(x) == len(mu)
    
    # solve y=K^(-1)x = L^(-T)L^(-1)x
    x = np.array(x - mu)
    y = solve_triangular(L, x.T, lower=True)
    y = solve_triangular(L.T, y, lower=False)
    
    if not compute_grad:
        log_determinant_part = -np.sum(np.log(np.diag(L)))
        quadratic_part = -0.5 * x.dot(y)
        const_part = -0.5 * len(L) * np.log(2 * np.pi)
        
        return const_part + log_determinant_part + quadratic_part
    else:
        return -y
Example #9
0
    def muN(self,x,n,L,X,temp1,kern,grad=False,onlyGrad=False):
        muStart=kern.mu
        x=np.array(x).reshape((1,self.n1))
        tempN=self._numberTraining+n
        B=np.zeros([1,tempN])
        
        for i in xrange(tempN):
            B[:,i]=self._k.K(x,X[i:i+1,:])

        temp2=linalg.solve_triangular(L,B.T,lower=True)
        
        if grad:
            gradX=self.gradXKern(x,n,self._k,self._numberTraining,X,self.n1)
            gradi=np.zeros(self.n1)
            
            for j in xrange(self.n1):
                temp5=linalg.solve_triangular(L,gradX[:,j].T,lower=True)
                gradi[j]=np.dot(temp5,temp1)
            
        
        if onlyGrad:
            return gradi
        
        a=muStart+np.dot(temp2.T,temp1)
        if grad==False:
            return a

        return a,gradi
Example #10
0
def _batch_omp_step(G, alpha_0, m, eps_0=None, eps=None):
    idx = []
    L = np.ones((1,1))
    alpha = alpha_0
    eps_curr = eps_0
    delta = 0
    it = 0
    if eps == None:
        stopping_condition = lambda: it == m
    else:
        stopping_condition = lambda: eps_curr <=eps

    while not stopping_condition():
        lam = np.abs(alpha).argmax()
        if len(idx) > 0:
            w = linalg.solve_triangular(L, G[idx, lam],
                                        lower = True, unit_diagonal=True)
            L = np.r_[np.c_[L, np.zeros(len(L))],
                      np.atleast_2d(np.append(w, np.sqrt(1-np.inner(w,w))))]
        idx.append(lam)
        it +=1
        Ltc = linalg.solve_triangular(L, alpha_0[idx], lower=True)
        gamma = linalg.solve_triangular(L, Ltc, trans=1, lower=True)
        beta = np.dot(G[:, idx], gamma)
        alpha = alpha_0 - beta
        if eps != None:
            eps_curr += delta
            delta = np.inner(gamma, beta[idx])
            eps_curr -= delta
    return gamma, idx
Example #11
0
 def muN(self,x,n,data,L,temp1,grad=True,onlyGradient=False):
     tempN=self._numberTraining+n
     x=np.array(x).reshape((1,self.n1))
     if onlyGradient:
         gradX=self.gradXKern(x,n,self._k,self._numberTraining,
                              data.Xhist[0:tempN,:],self.n1)
         gradi=np.zeros(self.n1)
         for j in xrange(self.n1):
             temp2=linalg.solve_triangular(L,gradX[:,j].T,lower=True)
             gradi[j]=np.dot(temp2.T,temp1)
         return gradi
         
     X=data.Xhist[0:tempN,:]
     B=np.zeros([1,tempN])
     muStart=self._k.mu
     
     for i in xrange(tempN):
         B[:,i]=self._k.K(x,X[i:i+1,:])
     temp2=linalg.solve_triangular(L,B.T,lower=True)
     a=muStart+np.dot(temp2.T,temp1)
     if grad==False:
         return a
     
     gradX=self.gradXKern(x,n,self._k,self._numberTraining,
                          data.Xhist[0:tempN,:],self.n1)
     gradi=np.zeros(self.n1)
     for j in xrange(self.n1):
         temp2=linalg.solve_triangular(L,gradX[:,j].T,lower=True)
         gradi[j]=np.dot(temp2.T,temp1)
     return a,gradi
Example #12
0
 def muN(self,x,n,grad=False):
     x=np.array(x)
     m=1
     tempN=self._numberTraining+n
     X=self._Xhist[0:tempN,:]
     A=self._k.A(self._Xhist[0:tempN,:],noise=self._noiseHist[0:tempN])
     L=np.linalg.cholesky(A)
     x=np.array(x).reshape((1,self.n1))
     B=np.zeros([m,tempN])
     
     for i in xrange(tempN):
         B[:,i]=self._k.K(x,X[i:i+1,:])
         
     y=self._yHist[0:tempN,:]
     temp2=linalg.solve_triangular(L,B.T,lower=True)
     muStart=self._k.mu
     temp1=linalg.solve_triangular(L,np.array(y)-muStart,lower=True)
     a=muStart+np.dot(temp2.T,temp1)
     if grad==False:
         return a
     x=np.array(x).reshape((1,self.n1))
     gradX=self.gradXKern(x,n,self)
     gradi=np.zeros(self.n1)
     temp3=linalg.solve_triangular(L,y-muStart,lower=True)
     
     for j in xrange(self.n1):
         temp2=linalg.solve_triangular(L,gradX[:,j].T,lower=True)
         gradi[j]=muStart+np.dot(temp2.T,temp3)
     return a,gradi
Example #13
0
 def power_iteration(self,x0,tol=1.e-10,max_it=1000, verbose = False):
     # P, L, U = sp.linalg.lu(A)
     L, U = self.lunopiv(self.matrices.L,1.e-6)
     #
     error = 1.
     it = 0
     #
     phi_new = x0
     lambda_new = np.linalg.norm(self.matrices.M.dot(phi_new))
     #
     while (error > tol) and (it < max_it):
         # setting the variable to the old name
         it = it+1
         phi_old = phi_new
         lambda_old = lambda_new
         #
         #phi_new = U\(L\(B*phi_old))
         tmp = self.matrices.M.dot(phi_old)
         sp_la.solve_triangular(L, tmp, lower=True,overwrite_b=True)
         sp_la.solve_triangular(U, tmp, lower=False,overwrite_b=True)
         phi_new = tmp
         #lambda_new = (phi_new*tmp)/(phi_old*tmp)
         #
         lambda_new = np.linalg.norm(phi_new)
         phi_new = phi_new/lambda_new
         #
         phi_error = np.linalg.norm(phi_new-phi_old)
         lambda_error = abs(lambda_new - lambda_old)/lambda_new
         error = max(phi_error,lambda_error)
         if (it%5 == 0 and verbose):
             print "iter = ", it, ";  lambda = ",lambda_new, "; error = ", error
     print "iter = ", it, ";  lambda = ",lambda_new, "; error = ", error
     self.state.keff = lambda_new
     self.state.phi = phi_new
Example #14
0
    def getParametersOptVoi(self,i):
	tempN=self.numberTraining+i
	args={}
	args['i']=i
	A=self.stat._k.A(self.dataObj.Xhist[0:tempN,:],noise=self.dataObj.varHist[0:tempN])
        L=np.linalg.cholesky(A)
	args['L']=L
	
	muStart=self.stat._k.mu
	y=self.dataObj.yHist
	temp1=linalg.solve_triangular(L,np.array(y)-muStart,lower=True)
	args['temp1']=temp1
	
	m=self._VOI._points.shape[0]
	temp2=np.zeros((m,tempN))
	
	X=self.dataObj.Xhist
	B=np.zeros((m,tempN))
	for i in xrange(tempN):
	    B[:,i]=self.stat._k.K(self._VOI._points,X[i:i+1,:])[:,0]
	
	a=np.zeros(m)
	for i in xrange(m):
	    temp2[i,:]=linalg.solve_triangular(L,B[i,:].T,lower=True)
	    a[i]=muStart+np.dot(temp2[i,:],temp1)

	args['temp2']=temp2

	args['a']=a
	return args
Example #15
0
def cholesky_omp(D, x, m, eps=None):
    if eps == None:
        stopping_condition = lambda: it == m  # len(idx) == m
    else:
        stopping_condition = lambda: np.inner(residual, residual) <= eps

    alpha = np.dot(x, D)

    # first step:
    it = 1
    lam = np.abs(np.dot(x, D)).argmax()
    idx = [lam]
    L = np.ones((1, 1))
    gamma = linalg.lstsq(D[:, idx], x)[0]
    residual = x - np.dot(D[:, idx], gamma)

    while not stopping_condition():
        lam = np.abs(np.dot(residual, D)).argmax()
        w = linalg.solve_triangular(L, np.dot(D[:, idx].T, D[:, lam]), lower=True, unit_diagonal=True)
        # should the diagonal be unit in theory? It crashes without it
        L = np.r_[np.c_[L, np.zeros(len(L))], np.atleast_2d(np.append(w, np.sqrt(1 - np.dot(w.T, w))))]
        idx.append(lam)
        it += 1
        # gamma = linalg.solve(np.dot(L, L.T), alpha[idx], sym_pos=True)
        # what am I, stupid??
        Ltc = linalg.solve_triangular(L, alpha[idx], lower=True)
        gamma = linalg.solve_triangular(L, Ltc, trans=1, lower=True)
        residual = x - np.dot(D[:, idx], gamma)

    return gamma, idx
Example #16
0
def variance(memory, predictor, processes = 1):
    """
    Computes predictive variance of latent function
    Arguments:
        memory(*)   :   Memory object learned from classifier learning
        predictor(*):   Predictor object cached with predictive covariance
    Keyword Arguments:
        processes   :   Number of cores to use for parallelising computations
    Returns:
        fq_var(*)   :   Predictive variance at query points
    (*) Accepts homogenous lists of described quantity
    """
    if isinstance(memory, list):
        memories_predictors = [(memory[i], predictor[i]) 
            for i in range(len(memory))]
        return parallel_starmap(variance, memories_predictors, 
            processes = processes)

    kernel = compose(memory.kerneldef)
    if memory.approxmethod == 'laplace':
        v = la.solve_triangular(memory.cache.get('L'), 
            (memory.cache.get('wsqrt') * predictor.Kq.T).T, 
            lower = True, check_finite = False)
    elif memory.approxmethod == 'pls':
        v = la.solve_triangular(memory.cache.get('L'), predictor.Kq,
            lower = True, check_finite = False)
    kqq = kernel(predictor.Xq, None, memory.hyperparams)
    fq_var = kqq - np.sum(v**2, axis = 0)
    return fq_var
Example #17
0
def chol_up_insert(L, V12, V23, V22, Snn_noise_std_vec,insertionID):

    R = L.T
    N = R.shape[0]
    n = V22.shape[0]
    noise = np.diag(Snn_noise_std_vec ** 2)
    R11 = R[:insertionID, :insertionID]
    R33 = R[insertionID:, insertionID:]
    S11 = R11
    S12 = la.solve_triangular(R11.T, V12, lower=True)
    S13 = R[:insertionID,insertionID:]
    S22 = linalg.jitchol(V22+noise - S12.T.dot(S12)).T
    if V23.shape[1] != 0: # The data is being inserted between columns
        S23 = la.solve_triangular(S22.T,(V23-S12.T.dot(S13)), lower=True)
        S33 = linalg.jitchol(R33.T.dot(R33)-S23.T.dot(S23)).T
    else: #the data is being appended at the end of the matrix
        S23 = np.zeros((n,0))
        S33 = np.zeros((0,0))
    On1 = np.zeros((n, insertionID))
    On2 = np.zeros((N-insertionID, insertionID))
    On3 = np.zeros((N-insertionID,n))

    top = np.concatenate((S11, S12, S13), axis=1)
    middle = np.concatenate((On1, S22, S23), axis=1)
    bottom = np.concatenate((On2, On3, S33), axis=1)
    return np.concatenate((top, middle, bottom), axis=0).T
		def negLogEvidence(theta, grad):

			if _optimiseLengthHyperparams:
				self.lengthScale = theta[:-2]
				self.gpLengthScale.setLearningResult(theta[-2:], 0)
			else:
				self.lengthScale = theta.copy()

			self.updateLengthScaleModel()

			self.S = self.kernel(self.X, self.X) + self.sigma**2 * self.I
			self.L = choleskyjitter(self.S)

			alpha = la.solve_triangular(self.L.T, la.solve_triangular(self.L, self.y, lower = True, check_finite = False), check_finite = False)
			negLogEvidenceValue = 0.5 * self.y.dot(alpha) + np.sum(np.log(self.L.diagonal()))
			
			# VERBOSE
			if VERBOSE:
				negLogEvidence.counter += 1

				if negLogEvidence.lastPrintedSec != time.gmtime().tm_sec:
					negLogEvidence.lastPrintedSec = time.gmtime().tm_sec
					if _optimiseLengthHyperparams:
						print(negLogEvidence.counter, '\t', self.lengthScale, self.stationaryLengthScale, self.gpLengthScale.kernelHyperparams, negLogEvidenceValue)
					else:
						print(negLogEvidence.counter, '\t', self.lengthScale, self.stationaryLengthScale, negLogEvidenceValue)

			return(negLogEvidenceValue)
 def expec_Lambda_b(self):
     """
     Parameters of the noise in general -- captures the increase in noise over time, and its relationship with y.
     """              
     for f in range(self.F):                         
         # UPDATE Lambda ------------------------       
         shape_Lambda = self.T + 1 + self.shape0_Lambda + self.P
         scale_Lambda = self.scale0_Lambda * self.K_time[0:self.T, :][:, 0:self.T]
         for p in range(self.P):
             pidxs = self.periods==p
             inn_f = self.e[pidxs, f:f+1] # deviations from mean of 0
             inn_fp = inn_f.dot(inn_f.T) + self.cov_e[f][pidxs][:, pidxs]
             scale_Lambda += inn_fp / self.K_target[pidxs][:, pidxs] * self.b[p, f]
         self.Lambda_e[f] = scale_Lambda / (shape_Lambda - self.T - 1)# P x P                            
                         
         # UPDATE b --------------------------- Check against bird paper.            
         shape_b = self.lambda_e[f] + self.T/2.0
         expec_log_b = np.zeros(self.P)
         for p in range(self.P):
             pidxs = self.periods==p
             inn_f = self.e[pidxs, f]
             var_e = np.diag(np.diag(self.cov_e[f][pidxs][:, pidxs]))
             inn_fp = inn_f.dot(inn_f) + var_e            
             
             L_Lambda = cholesky(self.Lambda_e[f] * self.K_target[pidxs][:, pidxs] + 1e-6  * np.eye(self.T), lower=True, check_finite=False)
             B = solve_triangular(L_Lambda, inn_fp, overwrite_b=True, check_finite=False, lower=True)
             A = solve_triangular(L_Lambda.T, B, overwrite_b=True, check_finite=False)
             rate_b = self.lambda_e[f] + np.trace(A)/2.0
             self.b[p, f] = shape_b / rate_b
             expec_log_b[p] = psi(shape_b) - np.log(rate_b)
                    
         # UPDATE lambda -----------------------
         shape_lambda = self.shape0_lambda + 0.5 * self.N
         scale_Lambda = self.scale0_Lambda - 0.5 * np.sum(1 + expec_log_b - self.b[:, f])
         self.lambda_e[f] = shape_lambda / scale_Lambda            
Example #20
0
    def aN_grad(self,x,L,n,dataObj,gradient=True,onlyGradient=False,logproductExpectations=None):
        """
        Computes a_{n} and it can compute its derivative. It evaluates a_{n},
        when grad and onlyGradient are False; it evaluates the a_{n} and computes its
        derivative when grad is True and onlyGradient is False, and computes only its
        gradient when gradient and onlyGradient are both True.
        
        Args:
            x: a_{n} is evaluated at x.
            L: Cholesky decomposition of the matrix A, where A is the covariance
               matrix of the past obsevations (x,w).
            n: Step of the algorithm.
            dataObj: Data object (it contains all the history).
            gradient: True if we want to compute the gradient; False otherwise.
            onlyGradient: True if we only want to compute the gradient; False otherwise.
            logproductExpectations: Vector with the logarithm of the product of the
                                    expectations of np.exp(-alpha2[j]*((z-W[i,j])**2))
                                    where W[i,:] is a point in the history.
                                    --Only with the SEK--
        """
        n1=self.n1
        n2=self.n2
        muStart=self._k[0].mu
        y2=dataObj.yHist[0:n+self._numberTraining]-muStart
        B=np.zeros(n+self._numberTraining)
        
        if logproductExpectations is None:
            for i in xrange(n+self._numberTraining):
                B[i]=self.B(x,dataObj.Xhist[i,:],self.n1,self.n2,self._k)
        else:
            for i in xrange(n+self._numberTraining):
                B[i]=self.B(x,dataObj.Xhist[i,:],self.n1,self.n2,self._k,logproductExpectations[i])
        
        inv1=linalg.solve_triangular(L,y2,lower=True)

        if onlyGradient:
            gradXB=self.gradXBforAn(x,n,B,self._k,
                                    dataObj.Xhist[0:n+self._numberTraining,0:n1],
                                    n1,self._numberTraining,
                                    dataObj.Xhist[0:n+self._numberTraining,n1:n1+n2],
                                    n2)
            temp4=linalg.solve_triangular(L,gradXB.transpose(),lower=True)
            gradAn=np.dot(inv1.transpose(),temp4)
            
    
            return gradAn

        inv2=linalg.solve_triangular(L,B.transpose(),lower=True)
        aN=muStart+np.dot(inv2.transpose(),inv1)
        if gradient==True:
            gradXB=self.gradXBforAn(x,n,B,self._k,
                                    dataObj.Xhist[0:n+self._numberTraining,0:n1],
                                    n1,self._numberTraining,
                                    dataObj.Xhist[0:n+self._numberTraining,n1:n1+n2],n2)
            temp4=linalg.solve_triangular(L,gradXB.transpose(),lower=True)
            gradAn=np.dot(inv1.transpose(),temp4)
            
            return aN,gradAn
        else:
            return aN
 def expec_e(self):
     """
     Noise of each observation
     """
     innovation = self.x - (self.y * self.a + self.c) # mu0_e == 0
     
     for f in range(self.F):
         inn_f = innovation[:, f][:, np.newaxis]
         
         # UPDATE e -----------------------------
         self.cov_e[f] = np.zeros((self.N, self.N))
         for p in range(self.P):
             pidxs = self.periods==p
             inn_fp = inn_f[pidxs]
                              
             K = self.K_target[pidxs][:, pidxs] * self.Lambda_e[f] / self.b[p, f] + 1e-6 * np.eye(self.T)
             
             a_diag = np.diag(self.a[pidxs, f])
             y_diag = np.diag(self.y[pidxs, 0])
             
             var_c = np.diag(np.diag(self.cov_c[f][pidxs][:, pidxs]))
             var_a = np.diag(np.diag(self.cov_a[f][pidxs][:, pidxs]))
             var_y = np.diag(np.diag(self.cov_y[pidxs][:, pidxs]))
             
             S_e = K + var_c + y_diag.dot(var_a).dot(y_diag.T) + a_diag.dot(var_y).dot(a_diag.T)
             Le = cholesky(S_e, lower=True, overwrite_a=True, check_finite=False)
             B = solve_triangular(Le, inn_fp, lower=True, overwrite_b=True, check_finite=False)
             A = solve_triangular(Le.T, B, overwrite_b=True, check_finite=False)           
             V = solve_triangular(Le, K, check_finite=False, lower=True)   
             
             self.e[pidxs, f] = K.dot(A).reshape(-1)
             self.cov_e[f][np.ix_(pidxs, pidxs)] = K - V.T.dot(V) 
Example #22
0
 def _posterior_dist(self,A,beta,XX,XY,full_covar=False):
     '''
     Calculates mean and covariance matrix of posterior distribution
     of coefficients.
     '''
     # compute precision matrix for active features
     Sinv = beta * XX
     np.fill_diagonal(Sinv, np.diag(Sinv) + A)
     cholesky = True
     # try cholesky, if it fails go back to pinvh
     try:
         # find posterior mean : R*R.T*mean = beta*X.T*Y
         # solve(R*z = beta*X.T*Y) => find z => solve(R.T*mean = z) => find mean
         R    = np.linalg.cholesky(Sinv)
         Z    = solve_triangular(R,beta*XY, check_finite=False, lower = True)
         Mn   = solve_triangular(R.T,Z, check_finite=False, lower = False)
         
         # invert lower triangular matrix from cholesky decomposition
         Ri   = solve_triangular(R,np.eye(A.shape[0]), check_finite=False, lower=True)
         if full_covar:
             Sn   = np.dot(Ri.T,Ri)
             return Mn,Sn,cholesky
         else:
             return Mn,Ri,cholesky
     except LinAlgError:
         cholesky = False
         Sn   = pinvh(Sinv)
         Mn   = beta*np.dot(Sinv,XY)
         return Mn, Sn, cholesky
 def expec_c(self):        
     for f in range(self.F):      
         innovation = self.x[:, f:f+1] - (self.y * self.a[:, f:f+1] + self.mu0_c + self.e[:, f:f+1]) # observation minus prior over forecasters' predictions
         
         K = self.s_c[f] * self.K
         y_diag = np.diag(self.y[:, 0])             
         a_diag = np.diag(self.a[:, f])
         
         var_a = np.diag(np.diag(self.cov_a[f])) 
         var_y = np.diag(np.diag(self.cov_y))
         var_e = np.diag(np.diag(self.cov_e[f]))
         
         S_c = K + y_diag.dot(var_a).dot(y_diag.T) + a_diag.dot(var_y).dot(a_diag.T) + var_e
         Lc = cholesky(S_c, lower=True, overwrite_a=True, check_finite=False)
         B = solve_triangular(Lc, innovation, lower=True, overwrite_b=True, check_finite=False)
         A = solve_triangular(Lc.T, B, overwrite_b=True, check_finite=False)           
         V = solve_triangular(Lc, K, check_finite=False, lower=True)            
         
         self.c[:, f] = self.mu0_c + K.dot(A).reshape(-1)
         self.cov_c[f] = K - V.T.dot(V) # WHY DO SOME DIAGONALS IN THE TRAINING IDXS END UP < 0? RELATED TO LOWER S_C VALUES? -- TRY FIXING COV_Y FIRST. ALSO CHECK Y_DIAG.COV_A.Y_DIAG
         
         rate0_s = self.s0_c
         shape_s = 1 + 0.5 * self.N 
         cf = self.c[:, f][:, np.newaxis] - self.mu0_c
         
         var_c = np.diag(np.diag(self.cov_c[f].T))            
         B = solve_triangular(self.L_K, cf.T + var_c, lower=True, overwrite_b=True)
         A = solve_triangular(self.L_K.T, B, overwrite_b=True)       
         
         rate_s = rate0_s + 0.5 * np.trace(A)
Example #24
0
    def _update(self):
        sn2 = self._likelihood.s2
        su2 = sn2 / 1e6

        # kernel wrt the inducing points.
        Kuu = self._kernel.get(self._U)
        p = self._U.shape[0]

        # cholesky for the information gain. note that we only need to compute
        # this once as it is independent from the data.
        self._L = sla.cholesky(Kuu + su2 * np.eye(p))

        # evaluate the kernel and residuals at the new points
        Kux = self._kernel.get(self._U, self._X)
        kxx = self._kernel.dget(self._X)
        r = self._y - self._mean

        # the cholesky of Q.
        V = sla.solve_triangular(self._L, Kux, trans=True)

        # rescale everything by the diagonal matrix ell.
        ell = np.sqrt(kxx + sn2 - np.sum(V**2, axis=0))
        Kux /= ell
        V /= ell
        r /= ell

        # NOTE: to update things incrementally all we need to do is store these
        # components. A just needs to be initialized at the identity and then
        # we just accumulate here.
        self._A = np.eye(p) + np.dot(V, V.T)
        self._a = np.dot(Kux, r)

        # update the posterior.
        self._R = np.dot(sla.cholesky(self._A), self._L)
        self._b = sla.solve_triangular(self._R, self._a, trans=True)
Example #25
0
    def test__solve_triangular_banded(self, its=100):
        for it in range(its):
            size = random.choice([0, 1, randint(0, 10), randint(0, 100)])
            b = randn(size)
            chol_bm = gen_chol_factor_BandMat(size, transposed=False)
            chol_data = chol_bm.data
            depth = chol_bm.l + chol_bm.u
            lower = (chol_bm.u == 0)
            if size > 0 and rand_bool() and rand_bool():
                badFrame = randint(size)
                chol_data[0 if lower else depth, badFrame] = 0.0
            else:
                badFrame = None
            transposed = rand_bool()
            overwrite_b = rand_bool()
            chol_full = chol_bm.full()

            b_arg = b.copy()
            if badFrame is not None:
                msg = (
                    'singular matrix: resolution failed at diagonal %d' %
                    badFrame
                )
                msgRe = '^' + re.escape(msg) + '$'
                with self.assertRaisesRegexp(la.LinAlgError, msgRe):
                    bla._solve_triangular_banded(
                        chol_data, b_arg, transposed=transposed, lower=lower,
                        overwrite_b=overwrite_b
                    )
                with self.assertRaisesRegexp(la.LinAlgError, msgRe):
                    sla.solve_triangular(
                        chol_full, b, trans=transposed, lower=lower
                    )
            else:
                x = bla._solve_triangular_banded(
                    chol_data, b_arg, transposed=transposed, lower=lower,
                    overwrite_b=overwrite_b
                )
                if transposed:
                    assert_allclose(bm.dot_mv(chol_bm.T, x), b)
                else:
                    assert_allclose(bm.dot_mv(chol_bm, x), b)
                if size == 0:
                    x_good = np.zeros((size,))
                else:
                    x_good = sla.solve_triangular(
                        chol_full, b, trans=transposed, lower=lower
                    )
                assert_allclose(x, x_good)
                assert not np.may_share_memory(x, chol_data)
                if size > 0:
                    self.assertEquals(
                        np.may_share_memory(x, b_arg),
                        overwrite_b
                    )

            if not overwrite_b:
                assert np.all(b_arg == b)
Example #26
0
def symmetric_solve_simple(P, L, tridiagonal, cell_sizes, free_values, alpha=(1. + sqrt(17)) / 8):
    dtype=tridiagonal.dtype
    z = linalg.solve_triangular(L, P.T.dot(free_values), lower=True, unit_diagonal=True)
    #print tridiagonal
    w = linalg.solve_banded((1, 1), tridiagonal, z)
    tri_inv = tridiagonal_inversion(tridiagonal, cell_sizes, dtype=dtype)
    w1 = tridiagonal_dot(tri_inv, z, dtype=dtype)
    y = linalg.solve_triangular(np.matrix(L, dtype=dtype).getH(), w, lower=False, unit_diagonal=True)
    return P.dot(y)
Example #27
0
    def predict(self, pred, full_cov=False, compute_grad=False):
        inputs = self.inputs
        values = self.values

        # Special case if there is no data yet (everything from the prior)
        if inputs is None:
            return self.predict_from_prior(pred, full_cov, compute_grad)

        if pred.shape[1] != self.num_dims:
            raise Exception("Dimensionality of inputs must match dimensionality given at init time.")

        # The primary covariances for prediction.
        cand_cross = self.noiseless_kernel.cross_cov(inputs, pred)
        
        chol, alpha = self._pull_from_cache_or_compute()

        # Solve the linear systems.
        # Note: if X = LL^T, cho_solve performs X\b whereas solve_triangular performs L\b
        beta = spla.solve_triangular(chol, cand_cross, lower=True)

        # Predict the marginal means at candidates.
        func_m = np.dot(cand_cross.T, alpha) + self.mean.value

        if full_cov:
            # Return the covariance matrix of the pred inputs, 
            # rather than just the individual variances at each input
            cand_cov = self.noiseless_kernel.cov(pred)
            func_v = cand_cov - np.dot(beta.T, beta)
        else:
            cand_cov = self.noiseless_kernel.diag_cov(pred)
            func_v = cand_cov - np.sum(beta**2, axis=0)

        if not compute_grad:
            return func_m, func_v

        grad_cross = self.noiseless_kernel.cross_cov_grad_data(inputs, pred)
        grad_xp_m  = np.tensordot(np.transpose(grad_cross, (1,2,0)), alpha, 1)

        # this should be faster than (and equivalent to) spla.cho_solve((chol, True),cand_cross))
        gamma = spla.solve_triangular(chol.T, beta, lower=False)

        # Using sum and multiplication and summing instead of matrix multiplication
        # because I only want the diagonals of the gradient of the covariance matrix, not the whole thing
        grad_xp_v = -2.0*np.sum(gamma[:,:,np.newaxis] * grad_cross, axis=0)

        # Not very important -- just to make sure grad_xp_v.shape = grad_xp_m.shape
        if values.ndim > 1:
            grad_xp_v = grad_xp_v[:,:,np.newaxis]
        
        # In case this is a function over a 1D input,
        # return a numpy array rather than a float
        if np.ndim(grad_xp_m) == 0:
            grad_xp_m = np.array([grad_xp_m])
            grad_xp_v = np.array([grad_xp_v])

        return func_m, func_v, grad_xp_m, grad_xp_v
Example #28
0
 def precond(y):
     """
     Find x in inverse(Rt*R) * y = x => Rt * (R * x) = y where R * x = z by:
     - Rt * z = y => z = ...
     - R  * x = z => x = ...
     i.e. two back-substitutions
     """
     z = solve_triangular(R.T, y, lower=True)
     x = solve_triangular(R, z, lower=False)
     return as2d(x)
Example #29
0
    def predict(self, Z_new, full_output=True, full_cov=False):
        """
        Predict the multinomial probability vector at a grid of points, Z
        :param Z_new:
        :return:
        """
        assert len(self.data_list) == 1, "Must have one data list in order to predict."
        data = self.data_list[0]
        M = data["M"]
        Z = data["Z"]

        assert Z_new is not None and Z_new.ndim == 2 and Z_new.shape[1] == self.D
        M_new = Z_new.shape[0]

        # Compute the kernel for Z_news
        C   = self.kernel.K(Z, Z)
        Cvv = C + np.diag(1e-6 * np.ones(M))
        Lvv = np.linalg.cholesky(Cvv)

        Cnn = self.kernel.K(Z_new, Z_new)

        # Compute the kernel between the new and valid points
        Cnv = self.kernel.K(Z_new, Z)

        # Predict the psis
        mu_psis_new = np.zeros((self.K, M_new))
        Sig_psis_new = np.zeros((self.K, M_new, M_new))
        for k in xrange(self.K):
            sys.stdout.write(".")
            sys.stdout.flush()

            psik = data["psi"][:,k]

            # Compute the predictive parameters
            y = solve_triangular(Lvv, psik, lower=True)
            x = solve_triangular(Lvv.T, y, lower=False)
            psik_pred = Cnv.dot(x)

            # Save these into the combined arrays
            mu_psis_new[k] = psik_pred + self.mu[k]

            if full_cov:
                # Sig_pred = Cnn - Cnv.dot(np.linalg.solve(Cvv, Cnv.T))
                Sig_psis_new[k] = Cnn - Cnv.dot(dpotrs(Lvv, Cnv.T, lower=True)[0])

        sys.stdout.write("\n")
        sys.stdout.flush()

        # Convert these to pis
        pis_new = np.array([ln_psi_to_pi(psi) for psi in mu_psis_new])

        if full_output:
            return pis_new, mu_psis_new, Sig_psis_new
        else:
            return pis_new
Example #30
0
    def KGAlg(self,m,nRepeat=1,Train=True,plots=False,**kwargs):
	if self.miscObj.create:
	    fl.createNewFilesFunc(self.path,self.miscObj.rs)
	fl.writeTraining(self)
        if Train is True:
	    self.trainModel(numStarts=nRepeat,**kwargs)
        for i in range(m):
            print i
	    
	    if plots is True:
		tempN=i+self.numberTraining
		At=self.stat._k.A(self.dataObj.Xhist[0:tempN,:],noise=self.dataObj.varHist[0:tempN])
		Lt=np.linalg.cholesky(At)
		
		muStartt=self.stat._k.mu
		yt=self.dataObj.yHist
		temp1t=linalg.solve_triangular(Lt,np.array(yt)-muStartt,lower=True)
		m2=self._VOI._points.shape[0]
		

		self.stat.plotmuN(i,Lt,temp1t,self._VOI._points,m2,
				  self.path,self.dataObj,self.stat._k,
				  self.dataObj.Xhist)
		

		temp2=np.zeros((m2,tempN))
		
		X=self.dataObj.Xhist
		B2=np.zeros((m2,tempN))
		for j in xrange(tempN):
		    B2[:,j]=self.stat._k.K(self._VOI._points,X[j:j+1,:])[:,0]
		
		a2=np.zeros(m2)
		for j in xrange(m2):
		    temp2[j,:]=linalg.solve_triangular(Lt,B2[j,:].T,lower=True)
		    a2[j]=muStartt+np.dot(temp2[j,:],temp1t)

		self._VOI.plotVOI(i,self._VOI._points,Lt,self.dataObj,self.stat._k,
				  temp1t,temp2,a2,m2,self.path)
		
	    if self.miscObj.parallel:
		self.optVOIParal(i,self.opt.numberParallel)
	    else:
		 self.optVOInoParal(i)

            print i
	    if self.miscObj.parallel:
		self.optAnParal(i,self.opt.numberParallel)
	    else:
		self.optAnnoParal(i)
            print i
	if self.miscObj.parallel:
	    self.optAnParal(i,self.opt.numberParallel)
	else:
	    self.optAnnoParal(i)
Example #31
0
def _mahalanobis_distances(m, L, X):
    cX = X - m[np.newaxis, :]
    tmp = solve_triangular(L, cX.T, lower=True).T
    tmp **= 2
    # return np.sqrt(tmp.sum(axis=1))
    return tmp.sum(axis=1)
Example #32
0
 def func_wrapped(params):
     return solve_triangular(transform,
                             func(xdata, *params) - ydata,
                             lower=True)
Example #33
0
 def jac_wrapped(params):
     return solve_triangular(transform,
                             np.asarray(jac(xdata, *params)),
                             lower=True)
Example #34
0
    def grad_optimize_ei(self, cand, comp, pend, vals, compute_grad=True):
        if pend.shape[0] == 0:
            best = np.min(vals)
            cand = np.reshape(cand, (-1, comp.shape[1]))

            # The primary covariances for prediction.
            comp_cov = self.cov(comp)
            cand_cross = self.cov(comp, cand)

            # Compute the required Cholesky.
            obsv_cov = comp_cov + self.noise * np.eye(comp.shape[0])
            obsv_chol = spla.cholesky(obsv_cov, lower=True)

            cov_grad_func = getattr(gp, 'grad_' + self.cov_func.__name__)
            cand_cross_grad = cov_grad_func(self.ls, comp, cand)

            # Predictive things.
            # Solve the linear systems.
            alpha = spla.cho_solve((obsv_chol, True), vals - self.mean)
            beta = spla.solve_triangular(obsv_chol, cand_cross, lower=True)

            # Predict the marginal means and variances at candidates.
            func_m = np.dot(cand_cross.T, alpha) + self.mean
            func_v = self.amp2 * (1 + 1e-6) - np.sum(beta**2, axis=0)

            # Expected improvement
            func_s = np.sqrt(func_v)
            u = (best - func_m) / func_s
            ncdf = sps.norm.cdf(u)
            npdf = sps.norm.pdf(u)
            ei = func_s * (u * ncdf + npdf)

            if not compute_grad:
                return ei

            # Gradients of ei w.r.t. mean and variance
            g_ei_m = -ncdf
            g_ei_s2 = 0.5 * npdf / func_s

            # Apply covariance function
            grad_cross = np.squeeze(cand_cross_grad)

            grad_xp_m = np.dot(alpha.transpose(), grad_cross)
            grad_xp_v = np.dot(
                -2 * spla.cho_solve((obsv_chol, True), cand_cross).transpose(),
                grad_cross)

            grad_xp = 0.5 * self.amp2 * (grad_xp_m * g_ei_m +
                                         grad_xp_v * g_ei_s2)
            ei = -np.sum(ei)

            return ei, grad_xp.flatten()

        else:
            # If there are pending experiments, fantasize their outcomes.
            cand = np.reshape(cand, (-1, comp.shape[1]))

            # Create a composite vector of complete and pending.
            comp_pend = np.concatenate((comp, pend))

            # Compute the covariance and Cholesky decomposition.
            comp_pend_cov = (self.cov(comp_pend) +
                             self.noise * np.eye(comp_pend.shape[0]))
            comp_pend_chol = spla.cholesky(comp_pend_cov, lower=True)

            # Compute submatrices.
            pend_cross = self.cov(comp, pend)
            pend_kappa = self.cov(pend)

            # Use the sub-Cholesky.
            obsv_chol = comp_pend_chol[:comp.shape[0], :comp.shape[0]]

            # Solve the linear systems.
            alpha = spla.cho_solve((obsv_chol, True), vals - self.mean)
            beta = spla.cho_solve((obsv_chol, True), pend_cross)

            # Finding predictive means and variances.
            pend_m = np.dot(pend_cross.T, alpha) + self.mean
            pend_K = pend_kappa - np.dot(pend_cross.T, beta)

            # Take the Cholesky of the predictive covariance.
            pend_chol = spla.cholesky(pend_K, lower=True)

            # Make predictions.
            npr.set_state(self.randomstate)
            pend_fant = np.dot(pend_chol,
                               npr.randn(pend.shape[0],
                                         self.pending_samples)) + self.mean

            # Include the fantasies.
            fant_vals = np.concatenate(
                (np.tile(vals[:, np.newaxis],
                         (1, self.pending_samples)), pend_fant))

            # Compute bests over the fantasies.
            bests = np.min(fant_vals, axis=0)

            # Now generalize from these fantasies.
            cand_cross = self.cov(comp_pend, cand)
            cov_grad_func = getattr(gp, 'grad_' + self.cov_func.__name__)
            cand_cross_grad = cov_grad_func(self.ls, comp_pend, cand)

            # Solve the linear systems.
            alpha = spla.cho_solve((comp_pend_chol, True),
                                   fant_vals - self.mean)
            beta = spla.solve_triangular(comp_pend_chol,
                                         cand_cross,
                                         lower=True)

            # Predict the marginal means and variances at candidates.
            func_m = np.dot(cand_cross.T, alpha) + self.mean
            func_v = self.amp2 * (1 + 1e-6) - np.sum(beta**2, axis=0)

            # Expected improvement
            func_s = np.sqrt(func_v[:, np.newaxis])
            u = (bests[np.newaxis, :] - func_m) / func_s
            ncdf = sps.norm.cdf(u)
            npdf = sps.norm.pdf(u)
            ei = func_s * (u * ncdf + npdf)

            # Gradients of ei w.r.t. mean and variance
            g_ei_m = -ncdf
            g_ei_s2 = 0.5 * npdf / func_s

            # Apply covariance function
            grad_cross = np.squeeze(cand_cross_grad)

            grad_xp_m = np.dot(alpha.transpose(), grad_cross)
            grad_xp_v = np.dot(
                -2 * spla.cho_solve(
                    (comp_pend_chol, True), cand_cross).transpose(),
                grad_cross)

            grad_xp = 0.5 * self.amp2 * (
                grad_xp_m * np.tile(g_ei_m, (comp.shape[1], 1)).T +
                (grad_xp_v.T * g_ei_s2).T)
            ei = -np.mean(ei, axis=1)
            grad_xp = np.mean(grad_xp, axis=0)

            return ei, grad_xp.flatten()
Example #35
0
    def compute_ei(self, comp, pend, cand, vals):
        if pend.shape[0] == 0:
            # If there are no pending, don't do anything fancy.

            # Current best.
            best = np.min(vals)

            # The primary covariances for prediction.
            comp_cov = self.cov(comp)
            cand_cross = self.cov(comp, cand)

            # Compute the required Cholesky.
            obsv_cov = comp_cov + self.noise * np.eye(comp.shape[0])
            obsv_chol = spla.cholesky(obsv_cov, lower=True)

            # Solve the linear systems.
            alpha = spla.cho_solve((obsv_chol, True), vals - self.mean)
            beta = spla.solve_triangular(obsv_chol, cand_cross, lower=True)

            # Predict the marginal means and variances at candidates.
            func_m = np.dot(cand_cross.T, alpha) + self.mean
            func_v = self.amp2 * (1 + 1e-6) - np.sum(beta**2, axis=0)

            # Expected improvement
            func_s = np.sqrt(func_v)
            u = (best - func_m) / func_s
            ncdf = sps.norm.cdf(u)
            npdf = sps.norm.pdf(u)
            ei = func_s * (u * ncdf + npdf)

            return ei
        else:
            # If there are pending experiments, fantasize their outcomes.

            # Create a composite vector of complete and pending.
            comp_pend = np.concatenate((comp, pend))

            # Compute the covariance and Cholesky decomposition.
            comp_pend_cov = (self.cov(comp_pend) +
                             self.noise * np.eye(comp_pend.shape[0]))
            comp_pend_chol = spla.cholesky(comp_pend_cov, lower=True)

            # Compute submatrices.
            pend_cross = self.cov(comp, pend)
            pend_kappa = self.cov(pend)

            # Use the sub-Cholesky.
            obsv_chol = comp_pend_chol[:comp.shape[0], :comp.shape[0]]

            # Solve the linear systems.
            alpha = spla.cho_solve((obsv_chol, True), vals - self.mean)
            beta = spla.cho_solve((obsv_chol, True), pend_cross)

            # Finding predictive means and variances.
            pend_m = np.dot(pend_cross.T, alpha) + self.mean
            pend_K = pend_kappa - np.dot(pend_cross.T, beta)

            # Take the Cholesky of the predictive covariance.
            pend_chol = spla.cholesky(pend_K, lower=True)

            # Make predictions.
            npr.set_state(self.randomstate)
            pend_fant = np.dot(pend_chol,
                               npr.randn(pend.shape[0],
                                         self.pending_samples)) + self.mean

            # Include the fantasies.
            fant_vals = np.concatenate(
                (np.tile(vals[:, np.newaxis],
                         (1, self.pending_samples)), pend_fant))

            # Compute bests over the fantasies.
            bests = np.min(fant_vals, axis=0)

            # Now generalize from these fantasies.
            cand_cross = self.cov(comp_pend, cand)

            # Solve the linear systems.
            alpha = spla.cho_solve((comp_pend_chol, True),
                                   fant_vals - self.mean)
            beta = spla.solve_triangular(comp_pend_chol,
                                         cand_cross,
                                         lower=True)

            # Predict the marginal means and variances at candidates.
            func_m = np.dot(cand_cross.T, alpha) + self.mean
            func_v = self.amp2 * (1 + 1e-6) - np.sum(beta**2, axis=0)

            # Expected improvement
            func_s = np.sqrt(func_v[:, np.newaxis])
            u = (bests[np.newaxis, :] - func_m) / func_s
            ncdf = sps.norm.cdf(u)
            npdf = sps.norm.pdf(u)
            ei = func_s * (u * ncdf + npdf)

            return np.mean(ei, axis=1)
Example #36
0
    def predict(self, X, return_std=False, return_cov=False):
        """Predict using the Gaussian process regression model

        We can also predict based on an unfitted model by using the GP prior.
        In addition to the mean of the predictive distribution, also its
        standard deviation (return_std=True) or covariance (return_cov=True).
        Note that at most one of the two can be requested.

        Parameters
        ----------
        X : array-like, shape = (n_samples, n_features)
            Query points where the GP is evaluated

        return_std : bool, default: False
            If True, the standard-deviation of the predictive distribution at
            the query points is returned along with the mean.

        return_cov : bool, default: False
            If True, the covariance of the joint predictive distribution at
            the query points is returned along with the mean

        Returns
        -------
        y_mean : array, shape = (n_samples, [n_output_dims])
            Mean of predictive distribution a query points

        y_std : array, shape = (n_samples,), optional
            Standard deviation of predictive distribution at query points.
            Only returned when return_std is True.

        y_cov : array, shape = (n_samples, n_samples), optional
            Covariance of joint predictive distribution a query points.
            Only returned when return_cov is True.
        """
        if return_std and return_cov:
            raise RuntimeError(
                "Not returning standard deviation of predictions when "
                "returning full covariance.")

        X = check_array(X)

        if not hasattr(self, "X_train_"):  # Unfitted;predict based on GP prior
            if self.kernel is None:
                kernel = (C(1.0, constant_value_bounds="fixed") *
                          RBF(1.0, length_scale_bounds="fixed"))
            else:
                kernel = self.kernel
            y_mean = np.zeros(X.shape[0])
            if return_cov:
                y_cov = kernel(X)
                return y_mean, y_cov
            elif return_std:
                y_var = kernel.diag(X)
                return y_mean, np.sqrt(y_var)
            else:
                return y_mean
        else:  # Predict based on GP posterior
            K_trans = self.kernel_(X, self.X_train_)
            y_mean = K_trans.dot(self.alpha_)  # Line 4 (y_mean = f_star)
            y_mean = self._y_train_mean + y_mean  # undo normal.
            if return_cov:
                v = cho_solve((self.L_, True), K_trans.T)  # Line 5
                y_cov = self.kernel_(X) - K_trans.dot(v)  # Line 6
                return y_mean, y_cov
            elif return_std:
                # cache result of K_inv computation
                if self._K_inv is None:
                    # compute inverse K_inv of K based on its Cholesky
                    # decomposition L and its inverse L_inv
                    L_inv = solve_triangular(self.L_.T,
                                             np.eye(self.L_.shape[0]))
                    self._K_inv = L_inv.dot(L_inv.T)

                # Compute variance of predictive distribution
                y_var = self.kernel_.diag(X)
                y_var -= np.einsum("ij,ij->i", np.dot(K_trans, self._K_inv),
                                   K_trans)

                # Check if any of the variances is negative because of
                # numerical issues. If yes: set the variance to 0.
                y_var_negative = y_var < 0
                if np.any(y_var_negative):
                    warnings.warn("Predicted variances smaller than 0. "
                                  "Setting those variances to 0.")
                    y_var[y_var_negative] = 0.0
                return y_mean, np.sqrt(y_var)
            else:
                return y_mean
Example #37
0
def ResoudreCMTC(a):
    b = np.append(np.full((1, a[0].__len__()), 0), 1)
    a = np.append(a.transpose(), np.full((1, a[0].__len__()), 1), axis=0)
    Q, R = alg.qr(a)
    return spla.solve_triangular(R, Q.T.dot(b), lower=False)
Example #38
0
    def solve_ricc_lrcf(A,
                        E,
                        B,
                        C,
                        R=None,
                        S=None,
                        trans=False,
                        options=None,
                        default_solver=None):
        """Compute an approximate low-rank solution of a Riccati equation.

        See :func:`pymor.algorithms.riccati.solve_ricc_lrcf` for a
        general description.

        This function uses `pymess.dense_nm_gmpcare` and `pymess.lrnm`.
        For both methods,
        :meth:`~pymor.vectorarrays.interfaces.VectorArrayInterface.to_numpy`
        and
        :meth:`~pymor.vectorarrays.interfaces.VectorSpaceInterface.from_numpy`
        need to be implemented for `A.source`.
        Additionally, since `dense_nm_gmpcare` is a dense solver, it
        expects :func:`~pymor.algorithms.to_matrix.to_matrix` to work
        for A and E.

        If the solver is not specified using the options or
        default_solver arguments, `dense_nm_gmpcare` is used for small
        problems (smaller than defined with
        :func:`~pymor.algorithms.lyapunov.mat_eqn_sparse_min_size`) and
        `lrnm` for large problems.

        Parameters
        ----------
        A
            The non-parametric |Operator| A.
        E
            The non-parametric |Operator| E or `None`.
        B
            The operator B as a |VectorArray| from `A.source`.
        C
            The operator C as a |VectorArray| from `A.source`.
        R
            The operator R as a 2D |NumPy array| or `None`.
        S
            The operator S as a |VectorArray| from `A.source` or `None`.
        trans
            Whether the first |Operator| in the Riccati equation is
            transposed.
        options
            The solver options to use (see
            :func:`ricc_lrcf_solver_options`).
        default_solver
            Default solver to use (pymess_lrnm,
            pymess_dense_nm_gmpcare).
            If `None`, chose solver depending on dimension `A`.

        Returns
        -------
        Z
            Low-rank Cholesky factor of the Riccati equation solution,
            |VectorArray| from `A.source`.
        """

        _solve_ricc_check_args(A, E, B, C, R, S, trans)
        if default_solver is None:
            default_solver = 'pymess_lrnm' if A.source.dim >= mat_eqn_sparse_min_size(
            ) else 'pymess_dense_nm_gmpcare'
        options = _parse_options(options, ricc_lrcf_solver_options(),
                                 default_solver, None, False)

        if options['type'] == 'pymess_dense_nm_gmpcare':
            X = _call_pymess_dense_nm_gmpare(A,
                                             E,
                                             B,
                                             C,
                                             R,
                                             S,
                                             trans=trans,
                                             options=options['opts'],
                                             plus=False)
            Z = _chol(X)
        elif options['type'] == 'pymess_lrnm':
            if S is not None:
                raise NotImplementedError
            if R is not None:
                import scipy.linalg as spla
                Rc = spla.cholesky(R)  # R = Rc^T * Rc
                Rci = spla.solve_triangular(Rc, np.eye(
                    Rc.shape[0]))  # R^{-1} = Rci * Rci^T
                if not trans:
                    C = C.lincomb(Rci.T)  # C <- Rci^T * C = (C^T * Rci)^T
                else:
                    B = B.lincomb(Rci.T)  # B <- B * Rci
            opts = options['opts']
            opts.type = pymess.MESS_OP_NONE if not trans else pymess.MESS_OP_TRANSPOSE
            eqn = RiccatiEquation(opts, A, E, B, C)
            Z, status = pymess.lrnm(eqn, opts)
        else:
            raise ValueError(
                f'Unexpected Riccati equation solver ({options["type"]}).')

        return A.source.from_numpy(Z.T)
Example #39
0
    def predict_variances_all_levels(self, X):
        """
        Evaluates the model at a set of points.

        Arguments
        ---------
        x : np.ndarray [n_evals, dim]
            Evaluation point input variable values

        Returns
        -------
        y : np.ndarray
            Evaluation point output variable values
        """
        # Initialization X = atleast_2d(X)
        nlevel = self.nlvl
        sigma2_rhos = []
        n_eval, n_features_X = X.shape
        #        if n_features_X != self.n_features:
        #            raise ValueError("Design must be an array of n_features columns.")
        X = (X - self.X_offset) / self.X_scale

        # Calculate kriging mean and variance at level 0
        mu = np.zeros((n_eval, nlevel))
        f = self._regression_types[self.options["poly"]](X)
        f0 = self._regression_types[self.options["poly"]](X)
        dx = self._differences(X, Y=self.X_norma_all[0])
        d = self._componentwise_distance(dx)

        # Get regression function and correlation
        F = self.F_all[0]
        C = self.optimal_par[0]["C"]

        beta = self.optimal_par[0]["beta"]
        Ft = solve_triangular(C, F, lower=True)
        # yt = solve_triangular(C, self.y_norma_all[0], lower=True)
        r_ = self._correlation_types[self.options["corr"]](
            self.optimal_theta[0], d).reshape(n_eval, self.nt_all[0])
        gamma = self.optimal_par[0]["gamma"]

        # Scaled predictor
        mu[:, 0] = (np.dot(f, beta) + np.dot(r_, gamma)).ravel()

        self.sigma2_rho = nlevel * [None]
        MSE = np.zeros((n_eval, nlevel))
        r_t = solve_triangular(C, r_.T, lower=True)
        G = self.optimal_par[0]["G"]

        u_ = solve_triangular(G.T, f.T - np.dot(Ft.T, r_t), lower=True)
        sigma2 = self.optimal_par[0]["sigma2"] / self.y_std**2
        MSE[:, 0] = sigma2 * (
            # 1 + self.optimal_noise_all[0] - (r_t ** 2).sum(axis=0) + (u_ ** 2).sum(axis=0)
            1 - (r_t**2).sum(axis=0) + (u_**2).sum(axis=0))

        # Calculate recursively kriging variance at level i
        for i in range(1, nlevel):
            F = self.F_all[i]
            C = self.optimal_par[i]["C"]
            g = self._regression_types[self.options["rho_regr"]](X)
            dx = self._differences(X, Y=self.X_norma_all[i])
            d = self._componentwise_distance(dx)
            r_ = self._correlation_types[self.options["corr"]](
                self.optimal_theta[i], d).reshape(n_eval, self.nt_all[i])
            f = np.vstack((g.T * mu[:, i - 1], f0.T))

            Ft = solve_triangular(C, F, lower=True)
            yt = solve_triangular(C, self.y_norma_all[i], lower=True)
            r_t = solve_triangular(C, r_.T, lower=True)
            G = self.optimal_par[i]["G"]
            beta = self.optimal_par[i]["beta"]

            # scaled predictor
            sigma2 = self.optimal_par[i]["sigma2"] / self.y_std**2
            q = self.q_all[i]
            u_ = solve_triangular(G.T, f - np.dot(Ft.T, r_t), lower=True)
            sigma2_rho = np.dot(
                g,
                sigma2 * linalg.inv(np.dot(G.T, G))[:q, :q] +
                np.dot(beta[:q], beta[:q].T),
            )
            sigma2_rho = (sigma2_rho * g).sum(axis=1)
            sigma2_rhos.append(sigma2_rho)

            if self.name in ["MFKPLS", "MFKPLSK"]:
                p = self.p_all[i]
                Q_ = (np.dot((yt - np.dot(Ft, beta)).T,
                             yt - np.dot(Ft, beta)))[0, 0]
                MSE[:, i] = (
                    # sigma2_rho * MSE[:, i - 1]
                    +Q_ / (2 * (self.nt_all[i] - p - q))
                    # * (1 + self.optimal_noise_all[i] - (r_t ** 2).sum(axis=0))
                    * (1 - (r_t**2).sum(axis=0)) + sigma2 *
                    (u_**2).sum(axis=0))
            else:
                MSE[:, i] = sigma2 * (
                    # 1 + self.optimal_noise_all[i] - (r_t ** 2).sum(axis=0) + (u_ ** 2).sum(axis=0)
                    1 - (r_t**2).sum(axis=0) +
                    (u_**2).sum(axis=0))  # + sigma2_rho * MSE[:, i - 1]
            if self.options["propagate_uncertainty"]:
                MSE[:, i] = MSE[:, i] + sigma2_rho * MSE[:, i - 1]

        # scaled predictor
        MSE *= self.y_std**2

        return MSE, sigma2_rhos
Example #40
0
    def predict(self, X, eval_MSE=False, batch_size=None):
        """
        This function evaluates the Gaussian Process model at x.

        Parameters
        ----------
        X : array_like
            An array with shape (n_eval, n_features) giving the point(s) at
            which the prediction(s) should be made.

        eval_MSE : boolean, optional
            A boolean specifying whether the Mean Squared Error should be
            evaluated or not.
            Default assumes evalMSE = False and evaluates only the BLUP (mean
            prediction).

        batch_size : integer, optional
            An integer giving the maximum number of points that can be
            evaluated simultaneously (depending on the available memory).
            Default is None so that all given points are evaluated at the same
            time.

        Returns
        -------
        y : array_like, shape (n_samples, ) or (n_samples, n_targets)
            An array with shape (n_eval, ) if the Gaussian Process was trained
            on an array of shape (n_samples, ) or an array with shape
            (n_eval, n_targets) if the Gaussian Process was trained on an array
            of shape (n_samples, n_targets) with the Best Linear Unbiased
            Prediction at x.

        MSE : array_like, optional (if eval_MSE == True)
            An array with shape (n_eval, ) or (n_eval, n_targets) as with y,
            with the Mean Squared Error at x.
        """
        check_is_fitted(self, "X")

        # Check input shapes
        X = check_array(X)
        n_eval, _ = X.shape
        n_samples, n_features = self.X.shape
        n_samples_y, n_targets = self.y.shape

        # Run input checks
        self._check_params(n_samples)

        if X.shape[1] != n_features:
            raise ValueError(("The number of features in X (X.shape[1] = %d) "
                              "should match the number of features used "
                              "for fit() "
                              "which is %d.") % (X.shape[1], n_features))

        if batch_size is None:
            # No memory management
            # (evaluates all given points in a single batch run)

            # Normalize input
            X = (X - self.X_mean) / self.X_std

            # Initialize output
            y = np.zeros(n_eval)
            if eval_MSE:
                MSE = np.zeros(n_eval)

            # Get pairwise componentwise L1-distances to the input training set
            dx = manhattan_distances(X, Y=self.X, sum_over_features=False)
            # Get regression function and correlation
            f = self.regr(X)
            r = self.corr(self.theta_, dx).reshape(n_eval, n_samples)

            # Scaled predictor
            y_ = np.dot(f, self.beta) + np.dot(r, self.gamma)

            # Predictor
            y = (self.y_mean + self.y_std * y_).reshape(n_eval, n_targets)

            if self.y_ndim_ == 1:
                y = y.ravel()

            # Mean Squared Error
            if eval_MSE:
                C = self.C
                if C is None:
                    # Light storage mode (need to recompute C, F, Ft and G)
                    if self.verbose:
                        print("This GaussianProcess used 'light' storage mode "
                              "at instantiation. Need to recompute "
                              "autocorrelation matrix...")
                    reduced_likelihood_function_value, par = \
                        self.reduced_likelihood_function()
                    self.C = par['C']
                    self.Ft = par['Ft']
                    self.G = par['G']

                rt = linalg.solve_triangular(self.C, r.T, lower=True)

                if self.beta0 is None:
                    # Universal Kriging
                    u = linalg.solve_triangular(self.G.T,
                                                np.dot(self.Ft.T, rt) - f.T,
                                                lower=True)
                else:
                    # Ordinary Kriging
                    u = np.zeros((n_targets, n_eval))

                MSE = np.dot(self.sigma2.reshape(n_targets, 1),
                             (1. - (rt**2.).sum(axis=0) +
                              (u**2.).sum(axis=0))[np.newaxis, :])
                MSE = np.sqrt((MSE**2.).sum(axis=0) / n_targets)

                # Mean Squared Error might be slightly negative depending on
                # machine precision: force to zero!
                MSE[MSE < 0.] = 0.

                if self.y_ndim_ == 1:
                    MSE = MSE.ravel()

                return y, MSE

            else:

                return y

        else:
            # Memory management

            if type(batch_size) is not int or batch_size <= 0:
                raise Exception("batch_size must be a positive integer")

            if eval_MSE:

                y, MSE = np.zeros(n_eval), np.zeros(n_eval)
                for k in range(max(1, int(n_eval / batch_size))):
                    batch_from = k * batch_size
                    batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])
                    y[batch_from:batch_to], MSE[batch_from:batch_to] = \
                        self.predict(X[batch_from:batch_to],
                                     eval_MSE=eval_MSE, batch_size=None)

                return y, MSE

            else:

                y = np.zeros(n_eval)
                for k in range(max(1, int(n_eval / batch_size))):
                    batch_from = k * batch_size
                    batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])
                    y[batch_from:batch_to] = \
                        self.predict(X[batch_from:batch_to],
                                     eval_MSE=eval_MSE, batch_size=None)

                return y
Example #41
0
def _cholesky_omp(X, y, n_nonzero_coefs, tol=None, copy_X=True,
                  return_path=False):
    """Orthogonal Matching Pursuit step using the Cholesky decomposition.

    Parameters
    ----------
    X : ndarray of shape (n_samples, n_features)
        Input dictionary. Columns are assumed to have unit norm.

    y : ndarray of shape (n_samples,)
        Input targets.

    n_nonzero_coefs : int
        Targeted number of non-zero elements.

    tol : float, default=None
        Targeted squared error, if not None overrides n_nonzero_coefs.

    copy_X : bool, default=True
        Whether the design matrix X must be copied by the algorithm. A false
        value is only helpful if X is already Fortran-ordered, otherwise a
        copy is made anyway.

    return_path : bool, default=False
        Whether to return every value of the nonzero coefficients along the
        forward path. Useful for cross-validation.

    Returns
    -------
    gamma : ndarray of shape (n_nonzero_coefs,)
        Non-zero elements of the solution.

    idx : ndarray of shape (n_nonzero_coefs,)
        Indices of the positions of the elements in gamma within the solution
        vector.

    coef : ndarray of shape (n_features, n_nonzero_coefs)
        The first k values of column k correspond to the coefficient value
        for the active features at that step. The lower left triangle contains
        garbage. Only returned if ``return_path=True``.

    n_active : int
        Number of active features at convergence.
    """
    if copy_X:
        X = X.copy('F')
    else:  # even if we are allowed to overwrite, still copy it if bad order
        X = np.asfortranarray(X)

    min_float = np.finfo(X.dtype).eps
    nrm2, swap = linalg.get_blas_funcs(('nrm2', 'swap'), (X,))
    potrs, = get_lapack_funcs(('potrs',), (X,))

    alpha = np.dot(X.T, y)
    residual = y
    gamma = np.empty(0)
    n_active = 0
    indices = np.arange(X.shape[1])  # keeping track of swapping

    max_features = X.shape[1] if tol is not None else n_nonzero_coefs

    L = np.empty((max_features, max_features), dtype=X.dtype)

    if return_path:
        coefs = np.empty_like(L)

    while True:
        lam = np.argmax(np.abs(np.dot(X.T, residual)))
        if lam < n_active or alpha[lam] ** 2 < min_float:
            # atom already selected or inner product too small
            warnings.warn(premature, RuntimeWarning, stacklevel=2)
            break

        if n_active > 0:
            # Updates the Cholesky decomposition of X' X
            L[n_active, :n_active] = np.dot(X[:, :n_active].T, X[:, lam])
            linalg.solve_triangular(L[:n_active, :n_active],
                                    L[n_active, :n_active],
                                    trans=0, lower=1,
                                    overwrite_b=True,
                                    check_finite=False)
            v = nrm2(L[n_active, :n_active]) ** 2
            Lkk = linalg.norm(X[:, lam]) ** 2 - v
            if Lkk <= min_float:  # selected atoms are dependent
                warnings.warn(premature, RuntimeWarning, stacklevel=2)
                break
            L[n_active, n_active] = sqrt(Lkk)
        else:
            L[0, 0] = linalg.norm(X[:, lam])

        X.T[n_active], X.T[lam] = swap(X.T[n_active], X.T[lam])
        alpha[n_active], alpha[lam] = alpha[lam], alpha[n_active]
        indices[n_active], indices[lam] = indices[lam], indices[n_active]
        n_active += 1

        # solves LL'x = X'y as a composition of two triangular systems
        gamma, _ = potrs(L[:n_active, :n_active], alpha[:n_active], lower=True,
                         overwrite_b=False)

        if return_path:
            coefs[:n_active, n_active - 1] = gamma
        residual = y - np.dot(X[:, :n_active], gamma)
        if tol is not None and nrm2(residual) ** 2 <= tol:
            break
        elif n_active == max_features:
            break

    if return_path:
        return gamma, indices[:n_active], coefs[:, :n_active], n_active
    else:
        return gamma, indices[:n_active], n_active
Example #42
0
    def reduced_likelihood_function(self, theta=None):
        """
        This function determines the BLUP parameters and evaluates the reduced
        likelihood function for the given autocorrelation parameters theta.

        Maximizing this function wrt the autocorrelation parameters theta is
        equivalent to maximizing the likelihood of the assumed joint Gaussian
        distribution of the observations y evaluated onto the design of
        experiments X.

        Parameters
        ----------
        theta : array_like, optional
            An array containing the autocorrelation parameters at which the
            Gaussian Process model parameters should be determined.
            Default uses the built-in autocorrelation parameters
            (ie ``theta = self.theta_``).

        Returns
        -------
        reduced_likelihood_function_value : double
            The value of the reduced likelihood function associated to the
            given autocorrelation parameters theta.

        par : dict
            A dictionary containing the requested Gaussian Process model
            parameters:

                sigma2
                        Gaussian Process variance.
                beta
                        Generalized least-squares regression weights for
                        Universal Kriging or given beta0 for Ordinary
                        Kriging.
                gamma
                        Gaussian Process weights.
                C
                        Cholesky decomposition of the correlation matrix [R].
                Ft
                        Solution of the linear equation system : [R] x Ft = F
                G
                        QR decomposition of the matrix Ft.
        """
        check_is_fitted(self, "X")

        if theta is None:
            # Use built-in autocorrelation parameters
            theta = self.theta_

        # Initialize output
        reduced_likelihood_function_value = -np.inf
        par = {}

        # Retrieve data
        n_samples = self.X.shape[0]
        D = self.D
        ij = self.ij
        F = self.F

        if D is None:
            # Light storage mode (need to recompute D, ij and F)
            D, ij = l1_cross_distances(self.X)
            if (np.min(np.sum(D, axis=1)) == 0.
                    and self.corr != correlation.pure_nugget):
                raise Exception("Multiple X are not allowed")
            F = self.regr(self.X)

        # Set up R
        r = self.corr(theta, D)
        R = np.eye(n_samples) * (1. + self.nugget)
        R[ij[:, 0], ij[:, 1]] = r
        R[ij[:, 1], ij[:, 0]] = r

        # Cholesky decomposition of R
        try:
            C = linalg.cholesky(R, lower=True)
        except linalg.LinAlgError:
            return reduced_likelihood_function_value, par

        # Get generalized least squares solution
        Ft = linalg.solve_triangular(C, F, lower=True)
        try:
            Q, G = linalg.qr(Ft, econ=True)
        except:
            #/usr/lib/python2.6/dist-packages/scipy/linalg/decomp.py:1177:
            # DeprecationWarning: qr econ argument will be removed after scipy
            # 0.7. The economy transform will then be available through the
            # mode='economic' argument.
            Q, G = linalg.qr(Ft, mode='economic')
            pass

        sv = linalg.svd(G, compute_uv=False)
        rcondG = sv[-1] / sv[0]
        if rcondG < 1e-10:
            # Check F
            sv = linalg.svd(F, compute_uv=False)
            condF = sv[0] / sv[-1]
            if condF > 1e15:
                raise Exception("F is too ill conditioned. Poor combination "
                                "of regression model and observations.")
            else:
                # Ft is too ill conditioned, get out (try different theta)
                return reduced_likelihood_function_value, par

        Yt = linalg.solve_triangular(C, self.y, lower=True)
        if self.beta0 is None:
            # Universal Kriging
            beta = linalg.solve_triangular(G, np.dot(Q.T, Yt))
        else:
            # Ordinary Kriging
            beta = np.array(self.beta0)

        rho = Yt - np.dot(Ft, beta)
        sigma2 = (rho**2.).sum(axis=0) / n_samples
        # The determinant of R is equal to the squared product of the diagonal
        # elements of its Cholesky decomposition C
        detR = (np.diag(C)**(2. / n_samples)).prod()

        # Compute/Organize output
        reduced_likelihood_function_value = -sigma2.sum() * detR
        par['sigma2'] = sigma2 * self.y_std**2.
        par['beta'] = beta
        par['gamma'] = linalg.solve_triangular(C.T, rho)
        par['C'] = C
        par['Ft'] = Ft
        par['G'] = G

        return reduced_likelihood_function_value, par
Example #43
0
def _gram_omp(Gram, Xy, n_nonzero_coefs, tol_0=None, tol=None,
              copy_Gram=True, copy_Xy=True, return_path=False):
    """Orthogonal Matching Pursuit step on a precomputed Gram matrix.

    This function uses the Cholesky decomposition method.

    Parameters
    ----------
    Gram : ndarray of shape (n_features, n_features)
        Gram matrix of the input data matrix.

    Xy : ndarray of shape (n_features,)
        Input targets.

    n_nonzero_coefs : int
        Targeted number of non-zero elements.

    tol_0 : float, default=None
        Squared norm of y, required if tol is not None.

    tol : float, default=None
        Targeted squared error, if not None overrides n_nonzero_coefs.

    copy_Gram : bool, default=True
        Whether the gram matrix must be copied by the algorithm. A false
        value is only helpful if it is already Fortran-ordered, otherwise a
        copy is made anyway.

    copy_Xy : bool, default=True
        Whether the covariance vector Xy must be copied by the algorithm.
        If False, it may be overwritten.

    return_path : bool, default=False
        Whether to return every value of the nonzero coefficients along the
        forward path. Useful for cross-validation.

    Returns
    -------
    gamma : ndarray of shape (n_nonzero_coefs,)
        Non-zero elements of the solution.

    idx : ndarray of shape (n_nonzero_coefs,)
        Indices of the positions of the elements in gamma within the solution
        vector.

    coefs : ndarray of shape (n_features, n_nonzero_coefs)
        The first k values of column k correspond to the coefficient value
        for the active features at that step. The lower left triangle contains
        garbage. Only returned if ``return_path=True``.

    n_active : int
        Number of active features at convergence.
    """
    Gram = Gram.copy('F') if copy_Gram else np.asfortranarray(Gram)

    if copy_Xy or not Xy.flags.writeable:
        Xy = Xy.copy()

    min_float = np.finfo(Gram.dtype).eps
    nrm2, swap = linalg.get_blas_funcs(('nrm2', 'swap'), (Gram,))
    potrs, = get_lapack_funcs(('potrs',), (Gram,))

    indices = np.arange(len(Gram))  # keeping track of swapping
    alpha = Xy
    tol_curr = tol_0
    delta = 0
    gamma = np.empty(0)
    n_active = 0

    max_features = len(Gram) if tol is not None else n_nonzero_coefs

    L = np.empty((max_features, max_features), dtype=Gram.dtype)

    L[0, 0] = 1.
    if return_path:
        coefs = np.empty_like(L)

    while True:
        lam = np.argmax(np.abs(alpha))
        if lam < n_active or alpha[lam] ** 2 < min_float:
            # selected same atom twice, or inner product too small
            warnings.warn(premature, RuntimeWarning, stacklevel=3)
            break
        if n_active > 0:
            L[n_active, :n_active] = Gram[lam, :n_active]
            linalg.solve_triangular(L[:n_active, :n_active],
                                    L[n_active, :n_active],
                                    trans=0, lower=1,
                                    overwrite_b=True,
                                    check_finite=False)
            v = nrm2(L[n_active, :n_active]) ** 2
            Lkk = Gram[lam, lam] - v
            if Lkk <= min_float:  # selected atoms are dependent
                warnings.warn(premature, RuntimeWarning, stacklevel=3)
                break
            L[n_active, n_active] = sqrt(Lkk)
        else:
            L[0, 0] = sqrt(Gram[lam, lam])

        Gram[n_active], Gram[lam] = swap(Gram[n_active], Gram[lam])
        Gram.T[n_active], Gram.T[lam] = swap(Gram.T[n_active], Gram.T[lam])
        indices[n_active], indices[lam] = indices[lam], indices[n_active]
        Xy[n_active], Xy[lam] = Xy[lam], Xy[n_active]
        n_active += 1
        # solves LL'x = X'y as a composition of two triangular systems
        gamma, _ = potrs(L[:n_active, :n_active], Xy[:n_active], lower=True,
                         overwrite_b=False)
        if return_path:
            coefs[:n_active, n_active - 1] = gamma
        beta = np.dot(Gram[:, :n_active], gamma)
        alpha = Xy - beta
        if tol is not None:
            tol_curr += delta
            delta = np.inner(gamma, beta[:n_active])
            tol_curr -= delta
            if abs(tol_curr) <= tol:
                break
        elif n_active == max_features:
            break

    if return_path:
        return gamma, indices[:n_active], coefs[:, :n_active], n_active
    else:
        return gamma, indices[:n_active], n_active
Example #44
0
def _solve_triangular_common(A, b, lower):
    """ Solves Ax=b when A is a triangular matrix. """
    if A.size == 0 and b.shape[0] == 0:
        return np.zeros((b.shape))
    else:
        return solve_triangular(A, b, lower=lower)
Example #45
0
 def dotv(self, vector):
     return sla.solve_triangular(self.Lt,
                                 sla.solve_triangular(self.L,
                                                      vector,
                                                      lower=True),
                                 lower=False)
Example #46
0
def sw2dComputeRHS_curved(h, hu, hv, hN, zx, zy, g, H, f, CD, ctx, cub_ctx,
                          gauss_ctx, curvedEls, J, gmapM, gmapP):

    cub_h = np.dot(cub_ctx.V, h)
    cub_hu = np.dot(cub_ctx.V, hu)
    cub_hv = np.dot(cub_ctx.V, hv)
    cub_hN = np.dot(cub_ctx.V, hN)

    cub_zx = np.dot(cub_ctx.V, zx)
    cub_zy = np.dot(cub_ctx.V, zy)

    cub_H = np.dot(cub_ctx.V, H)

    ((F1, F2, F3, F4),
     (G1, G2, G3, G4)) = sw2dComputeFluxes(cub_h, cub_hu, cub_hv, cub_hN, g,
                                           cub_H)

    DrT = np.transpose(cub_ctx.Dr)
    DsT = np.transpose(cub_ctx.Ds)

    tmpr = cub_ctx.W * (cub_ctx.rx * F1 + cub_ctx.ry * G1)
    tmps = cub_ctx.W * (cub_ctx.sx * F1 + cub_ctx.sy * G1)
    ddr = np.dot(DrT, tmpr)
    dds = np.dot(DsT, tmps)
    MMRHS1 = ddr + dds

    tmpr = cub_ctx.W * (cub_ctx.rx * F2 + cub_ctx.ry * G2)
    tmps = cub_ctx.W * (cub_ctx.sx * F2 + cub_ctx.sy * G2)
    ddr = np.dot(DrT, tmpr)
    dds = np.dot(DsT, tmps)
    MMRHS2 = ddr + dds

    tmpr = cub_ctx.W * (cub_ctx.rx * F3 + cub_ctx.ry * G3)
    tmps = cub_ctx.W * (cub_ctx.sx * F3 + cub_ctx.sy * G3)
    ddr = np.dot(DrT, tmpr)
    dds = np.dot(DsT, tmps)
    MMRHS3 = ddr + dds

    tmpr = cub_ctx.W * (cub_ctx.rx * F4 + cub_ctx.ry * G4)
    tmps = cub_ctx.W * (cub_ctx.sx * F4 + cub_ctx.sy * G4)
    ddr = np.dot(DrT, tmpr)
    dds = np.dot(DsT, tmps)
    MMRHS4 = ddr + dds

    nx = gauss_ctx.nx
    ny = gauss_ctx.ny
    mapW = gauss_ctx.BCmap[3]

    nxW = nx.flatten('F')[mapW]
    nyW = ny.flatten('F')[mapW]

    NGauss = gauss_ctx.nx.shape[0]
    numElements = gauss_ctx.nx.shape[1]

    gauss_h = np.dot(gauss_ctx.Interp, h).flatten('F')
    hM = gauss_h[gmapM]
    hP = gauss_h[gmapP]

    gauss_hu = np.dot(gauss_ctx.Interp, hu).flatten('F')
    huM = gauss_hu[gmapM]
    huP = gauss_hu[gmapP]

    gauss_hv = np.dot(gauss_ctx.Interp, hv).flatten('F')
    hvM = gauss_hv[gmapM]
    hvP = gauss_hv[gmapP]

    gauss_hN = np.dot(gauss_ctx.Interp, hN).flatten('F')
    hNM = gauss_hN[gmapM]
    hNP = gauss_hN[gmapP]

    gauss_H = np.dot(gauss_ctx.Interp, H).flatten('F')
    HM = gauss_H[gmapM]
    HP = gauss_H[gmapP]

    uM = huM / hM
    uP = huP / hP

    vM = hvM / hM
    vP = hvP / hP

    huP[mapW] = huM[mapW] - 2 * nxW * (huM[mapW] * nxW + hvM[mapW] * nyW)
    hvP[mapW] = hvM[mapW] - 2 * nyW * (huM[mapW] * nxW + hvM[mapW] * nyW)

    ((F1M, F2M, F3M, F4M), (G1M, G2M, G3M,
                            G4M)) = sw2dComputeFluxes(hM, huM, hvM, hNM, g, HM)
    ((F1P, F2P, F3P, F4P), (G1P, G2P, G3P,
                            G4P)) = sw2dComputeFluxes(hP, huP, hvP, hNP, g, HP)

    spdM = np.sqrt(uM * uM + vM * vM) + np.sqrt(g * hM)
    spdP = np.sqrt(uP * uP + vP * vP) + np.sqrt(g * hP)

    spdMax = np.max(np.array([spdM, spdP]), axis=0)

    numFaces = 3
    Nfp = int(NGauss / numFaces)
    K = ctx.numElements

    lam = np.reshape(spdMax, (Nfp, numFaces * numElements), order='F')
    lamMaxMat = np.outer(np.ones((Nfp, 1), dtype=np.float), np.max(lam,
                                                                   axis=0))
    spdMax = np.reshape(lamMaxMat, (Nfp * numFaces, K), order='F')

    dh = np.reshape(hM - hP, (Nfp * numFaces, K), order='F')
    dhu = np.reshape(huM - huP, (Nfp * numFaces, K), order='F')
    dhv = np.reshape(hvM - hvP, (Nfp * numFaces, K), order='F')
    dhN = np.reshape(hNM - hNP, (Nfp * numFaces, K), order='F')

    F1M = np.reshape(F1M, (Nfp * numFaces, K), order='F')
    F2M = np.reshape(F2M, (Nfp * numFaces, K), order='F')
    F3M = np.reshape(F3M, (Nfp * numFaces, K), order='F')
    F4M = np.reshape(F4M, (Nfp * numFaces, K), order='F')

    G1M = np.reshape(G1M, (Nfp * numFaces, K), order='F')
    G2M = np.reshape(G2M, (Nfp * numFaces, K), order='F')
    G3M = np.reshape(G3M, (Nfp * numFaces, K), order='F')
    G4M = np.reshape(G4M, (Nfp * numFaces, K), order='F')

    F1P = np.reshape(F1P, (Nfp * numFaces, K), order='F')
    F2P = np.reshape(F2P, (Nfp * numFaces, K), order='F')
    F3P = np.reshape(F3P, (Nfp * numFaces, K), order='F')
    F4P = np.reshape(F4P, (Nfp * numFaces, K), order='F')

    G1P = np.reshape(G1P, (Nfp * numFaces, K), order='F')
    G2P = np.reshape(G2P, (Nfp * numFaces, K), order='F')
    G3P = np.reshape(G3P, (Nfp * numFaces, K), order='F')
    G4P = np.reshape(G4P, (Nfp * numFaces, K), order='F')

    fluxh = 0.5 * ((F1M + F1P) * nx + (G1M + G1P) * ny + spdMax * dh)
    fluxhu = 0.5 * ((F2M + F2P) * nx + (G2M + G2P) * ny + spdMax * dhu)
    fluxhv = 0.5 * ((F3M + F3P) * nx + (G3M + G3P) * ny + spdMax * dhv)
    fluxhN = 0.5 * ((F4M + F4P) * nx + (G4M + G4P) * ny + spdMax * dhN)

    interp = np.transpose(gauss_ctx.Interp)
    MMRHS1 -= np.dot(interp, (gauss_ctx.W * fluxh))  # this was weird...
    MMRHS2 -= np.dot(interp, (gauss_ctx.W * fluxhu))
    MMRHS3 -= np.dot(interp, (gauss_ctx.W * fluxhv))
    MMRHS4 -= np.dot(interp, (gauss_ctx.W * fluxhN))

    RHS1 = np.zeros((ctx.numLocalPoints, ctx.numElements))
    RHS2 = np.zeros((ctx.numLocalPoints, ctx.numElements))
    RHS3 = np.zeros((ctx.numLocalPoints, ctx.numElements))
    RHS4 = np.zeros((ctx.numLocalPoints, ctx.numElements))

    curvedElsSet = set(curvedEls)
    straightEls = []
    for k in range(0, numElements):
        if k not in curvedElsSet:
            straightEls.append(k)

    V = ctx.V
    VT = V.T
    mmInvStandard = np.dot(V, VT)
    RHS1 = np.dot(mmInvStandard, MMRHS1 / J)
    RHS2 = np.dot(mmInvStandard, MMRHS2 / J)
    RHS3 = np.dot(mmInvStandard, MMRHS3 / J)
    RHS4 = np.dot(mmInvStandard, MMRHS4 / J)

    for k in curvedElsSet:
        mmChol = cub_ctx.MMChol[:, :, k]
        RHS1[:, k] = solve_triangular(
            mmChol, solve_triangular(mmChol, MMRHS1[:, k], trans='T'))
        RHS2[:, k] = solve_triangular(
            mmChol, solve_triangular(mmChol, MMRHS2[:, k], trans='T'))
        RHS3[:, k] = solve_triangular(
            mmChol, solve_triangular(mmChol, MMRHS3[:, k], trans='T'))
        RHS4[:, k] = solve_triangular(
            mmChol, solve_triangular(mmChol, MMRHS4[:, k], trans='T'))

    # Add source terms
    u = hu / h
    v = hv / h

    norm_u = np.hypot(u, v)
    CD_norm_u = CD * norm_u

    RHS2 += f * hv - CD_norm_u * u
    RHS3 -= f * hu - CD_norm_u * v
    RHS2 -= g * h * zx  # note: should over-integrate these.
    RHS3 -= g * h * zy

    return (RHS1, RHS2, RHS3, RHS4)