コード例 #1
0
ファイル: distortion.py プロジェクト: evanbiederstedt/pyBAST
    def lnprob_cov(C):

        # Get first term of loglikelihood expression (y * (1/C) * y.T)
        # Do computation using Cholesky decomposition
        try:
            
            U, luflag = cho_factor(C)
            
        except LinAlgError:

            # Matrix is not positive semi-definite, so replace it with the 
            #  positive semi-definite matrix that is nearest in the Frobenius norm

            E, EV = eigh(C)
            E[E<0] = 1e-12
            U, luflag = cho_factor(EV.dot(np.diag(Ep)).dot(EV.T))
            
        finally:

            x2 = cho_solve((U, luflag), dxy)
            L1 = dxy.dot(x2)

        # Get second term of loglikelihood expression (log det C)
        sign, L2 = slogdet(C)

        # Why am I always confused by this?
        thing_to_be_minimised = (L1 + L2)

        return thing_to_be_minimised
コード例 #2
0
ファイル: utilities.py プロジェクト: christoforosc/ndlutil
def jitchol(A,maxtries=5):
	"""
	Arguments
	---------
	A : An almost pd square matrix

	Returns
	-------
	cho_factor(K)

	Notes
	-----
	Adds jitter to K, to enforce positive-definiteness
	"""
	
	try:
		return linalg.cho_factor(A)
	except:
		diagA = np.diag(A)
		if np.any(diagA<0.):
			raise linalg.LinAlgError, "not pd: negative diagonal elements"
		jitter= diagA.mean()*1e-6
		for i in range(1,maxtries+1):
			try:
				return linalg.cho_factor(A+np.eye(A.shape[0])*jitter)
			except:
				jitter *= 10
				print 'Warning: adding jitter of '+str(jitter)
		raise linalg.LinAlgError,"not positive definite, even with jitter."
コード例 #3
0
 def model_and_cov(self, fit_params):
     model = (self.pmodel*fit_params['A']*self.b**fit_params['n'])[self.windowrange] + self.fgs(fit_params,self.lmax)[self.windowrange] #(self.pmodel*self.A*self.b**self.n)[self.windowrange] +self.fgs([self.Asz,self.Aps,self.Acib],self.freqs)[self.windowrange]
     if self.freqs[0] !=143:
         self.cho_cov = cho_factor(self.patch_sigma+self.planck_sigma+dot(self.windows,dot(self.beam_corr*outer(model,model),self.windows.T)))
     else:
         self.cho_cov = cho_factor(self.patch_sigma+self.planck_sigma)
     return (dot(self.windows,model),self.cho_cov)  
コード例 #4
0
ファイル: clustered_kde.py プロジェクト: bfarr/kombine
    def __init__(self, data):
        self._data = np.atleast_2d(data)

        self._mean = np.mean(data, axis=0)
        self._cov = None

        if self.data.shape[0] > 1:
            try:
                self._cov = np.cov(data.T)

                # Try factoring now to see if regularization is needed
                la.cho_factor(self._cov)

            except la.LinAlgError:
                self._cov = oas_cov(data)

        self._set_bandwidth()

        # store transformation variables for drawing random values
        alphas = np.std(data, axis=0)
        ms = 1./alphas
        m_i, m_j = np.meshgrid(ms, ms)
        ms = m_i * m_j
        self._draw_cov = ms * self._kernel_cov
        self._scale_fac = alphas
コード例 #5
0
    def rakeDistortionlessFilters(self, source, interferer, R_n, delay=0.03, epsilon=5e-3):
        '''
        Compute time-domain filters of a beamformer minimizing noise and interference
        while forcing a distortionless response towards the source.
        '''

        H = buildRIRMatrix(self.R, (source, interferer), self.Lg, self.Fs, epsilon=epsilon, unit_damping=True)
        L = H.shape[1]/2

        # We first assume the sample are uncorrelated
        K_nq = np.dot(H[:,L:], H[:,L:].T) + R_n

        # constraint
        kappa = int(delay*self.Fs)
        A = H[:,:L]
        b = np.zeros((L,1))
        b[kappa,0] = 1

        # filter computation
        C = la.cho_factor(K_nq, overwrite_a=True, check_finite=False)
        B = la.cho_solve(C, A)
        D = np.dot(A.T, B)
        C = la.cho_factor(D, overwrite_a=True, check_finite=False)
        x = la.cho_solve(C, b)
        g_val = np.dot(B, x)

        # reshape and store
        self.filters = g_val.reshape((self.M, self.Lg))

        # compute and return SNR
        A = np.dot(g_val.T, H[:,:L])
        num = np.dot(A, A.T)
        denom =  np.dot(np.dot(g_val.T, K_nq), g_val)

        return num/denom
コード例 #6
0
ファイル: GMM.py プロジェクト: JonasWallin/BayesFlow
	def likelihood_prior(self, mu, Sigma, k, R_S_mu = None, log_det_Q = None, R_S = None, switchprior = False):
			"""
					Computes the prior that is 
					\pi( \mu | \theta[k], \Sigma[k]) \pi(\Sigma| Q[k], \nu[k]) = 
					N(\mu; \theta[k], \Sigma[k]) IW(\Sigma; Q[k], \nu[k]) 

					If switchprior = True, special values of nu and Sigma_mu
					are used if the parameters nu_sw and Sigma_mu_sw are set
					respectively. This enables use of "relaxed" priors
					facilitating label switch. NB! This makes the kernel
					non-symmetric, hence it cannot be used in a stationary state.
			"""

			if switchprior:			
				try:
					nu = self.nu_sw
				except:
					nu = self.prior[k]['sigma']['nu']
				try:
					Sigma_mu = self.Sigma_mu_sw
				except:
					Sigma_mu = self.prior[k]['mu']['Sigma']
				Q = self.prior[k]['sigma']['Q']*nu/self.prior[k]['sigma']['nu']
			else:
				nu = self.prior[k]['sigma']['nu']
				Sigma_mu = self.prior[k]['mu']['Sigma']
				Q = self.prior[k]['sigma']['Q']

			if np.isnan(mu[0]) == 1:
					return 0, None, None, None
			
			if R_S_mu is None:
					R_S_mu = sla.cho_factor(Sigma_mu,check_finite = False)
			log_det_Sigma_mu = 2 * np.sum(np.log(np.diag(R_S_mu[0])))
			
			if log_det_Q is None:
					R_Q = sla.cho_factor(Q,check_finite = False)
					log_det_Q = 2 * np.sum(np.log(np.diag(R_Q[0])))
			
			if R_S is None:
					R_S = sla.cho_factor(Sigma,check_finite = False)
			log_det_Sigma	= 2 * np.sum(np.log(np.diag(R_S[0])))
			
			
			
			mu_theta = mu - self.prior[k]['mu']['theta'].reshape(self.d)
			# N(\mu; \theta[k], \Sigma[k])
			
			lik = - np.dot(mu_theta.T, sla.cho_solve(R_S_mu, mu_theta, check_finite = False))  /2
			lik = lik - 0.5 * (nu + self.d + 1.) * log_det_Sigma
			lik = lik +  (nu * 0.5) * log_det_Q
			lik = lik - 0.5 * log_det_Sigma_mu
			lik = lik - self.ln_gamma_d(0.5 * nu) - 0.5 * np.log(2) * (nu * self.d)
			lik = lik - 0.5 * np.sum(np.diag(sla.cho_solve(R_S, Q)))
			return lik, R_S_mu, log_det_Q, R_S
コード例 #7
0
ファイル: day4funcs.py プロジェクト: nanograv/cit-busyweek
def mark2loglikelihood(psr, Aw, Ar, Si):
    """
    Log-likelihood for our pulsar
    
    This likelihood does marginalize over the timing model. Calculate
    covariance matrix in the time-domain with:
    
    ll = 0.5 * res^{t} (C^{-1} - C^{-1} M (M^{T} C^{-1} M)^{-1} M^{T} C^{-1} ) res - \
         0.5 * log(det(C)) - 0.5 * log(det(M^{T} C^{-1} M))
         
    In relation to 'mark1loglikelihood', this likelihood has but a simple addition:
    res' = res - M xi
    where M is a (n x m) matrix, with m < n, and xi is a vector of length m. The xi
    are analytically marginalised over, yielding the above equation (up to constants)
    
    :param psr:
        pulsar object, containing the data and stuff

    :param Aw:
        White noise amplitude, model parameter

    :param Ar:
        Red noise amplitude, model parameter

    :param Si:
        Spectral index of red noise, model parameter
    """
    Mmat = psr.Mmat

    Cov = Aw ** 2 * np.eye(len(psr.toas)) + PL_covmat(psr.toas, Ar, alpha=0.5 * (3 - Si), fL=1.0 / (year * 20))

    cfC = sl.cho_factor(Cov)
    Cinv = sl.cho_solve(cfC, np.eye(len(psr.toas)))
    ldetC = 2 * np.sum(np.log(np.diag(cfC[0])))

    MCM = np.dot(Mmat.T, np.dot(Cinv, Mmat))
    cfM = sl.cho_factor(MCM)
    ldetM = 2 * np.sum(np.log(np.diag(cfM[0])))

    wr = np.dot(Cinv, psr.residuals)
    rCr = np.dot(psr.residuals, wr)
    MCr = np.dot(Mmat.T, wr)

    return (
        -0.5 * rCr
        + 0.5 * np.dot(MCr, sl.cho_solve(cfM, MCr))
        - 0.5 * ldetC
        - 0.5 * ldetM
        - 0.5 * len(psr.residuals) * np.log(2 * np.pi)
    )
コード例 #8
0
ファイル: regularized.py プロジェクト: all-umass/graphs
def _solve_admm(Y, q, alpha=10, mu=10, max_iter=10000):
  n = Y.shape[0]
  alpha_q = alpha * q
  # solve (YYt + mu*I + mu) Z = (mu*C - lambda + gamma + mu)
  A, lower = cho_factor(Y.dot(Y.T) + mu*(np.eye(n) + 1), overwrite_a=True)
  C = np.zeros(n)
  Z_old = 0  # shape (n,)
  lmbda = np.zeros(n)
  gamma = 0
  # ADMM iteration
  for i in range(max_iter):
    # call the guts of cho_solve directly for speed
    Z, _ = potrs(A, gamma + mu + mu*C - lmbda, lower=lower, overwrite_b=True)

    tmp = mu*Z + lmbda
    C[:] = np.abs(tmp)
    C -= alpha_q
    np.maximum(C, 0, out=C)
    C *= np.sign(tmp)
    C /= mu

    d_ZC = Z - C
    d_1Z = 1 - Z.sum()
    lmbda += mu * d_ZC
    gamma += mu * d_1Z

    if ((abs(d_1Z) / n < 1e-6)
            and (np.abs(d_ZC).mean() < 1e-6)
            and (np.abs(Z - Z_old).mean() < 1e-5)):
      break
    Z_old = Z
  else:
    warnings.warn('ADMM failed to converge after %d iterations.' % max_iter)
  return C
コード例 #9
0
ファイル: parallel.py プロジェクト: iancze/Starfish
    def evaluate(self):
        '''
        Return the lnprob using the current version of the C_GP matrix, data matrix,
        and other intermediate products.
        '''

        self.lnprob_last = self.lnprob

        X = (self.chebyshevSpectrum.k * self.flux_std * np.eye(self.ndata)).dot(self.eigenspectra.T)

        CC = X.dot(self.C_GP.dot(X.T)) + self.data_mat

        try:
            factor, flag = cho_factor(CC)
        except np.linalg.linalg.LinAlgError:
            print("Spectrum:", self.spectrum_id, "Order:", self.order)
            self.CC_debugger(CC)
            raise

        try:
            R = self.fl - self.chebyshevSpectrum.k * self.flux_mean - X.dot(self.mus)

            logdet = np.sum(2 * np.log((np.diag(factor))))
            self.lnprob = -0.5 * (np.dot(R, cho_solve((factor, flag), R)) + logdet)

            self.logger.debug("Evaluating lnprob={}".format(self.lnprob))
            return self.lnprob

        # To give us some debugging information about what went wrong.
        except np.linalg.linalg.LinAlgError:
            print("Spectrum:", self.spectrum_id, "Order:", self.order)
            raise
コード例 #10
0
def bench_lobpcg_mikota():
    print()
    print('                 lobpcg benchmark using mikota pairs')
    print('==============================================================')
    print('      shape      | blocksize |    operation   |   time   ')
    print('                                              | (seconds)')
    print('--------------------------------------------------------------')
    fmt = ' %15s |   %3d     |     %6s     | %6.2f '

    m = 10
    for n in 128, 256, 512, 1024, 2048:
        shape = (n, n)
        A, B = _mikota_pair(n)
        desired_evs = np.square(np.arange(1, m+1))

        tt = time.clock()
        X = rand(n, m)
        X = orth(X)
        LorU, lower = cho_factor(A, lower=0, overwrite_a=0)
        M = LinearOperator(shape,
                matvec=partial(_precond, LorU, lower),
                matmat=partial(_precond, LorU, lower))
        eigs, vecs = lobpcg(A, X, B, M, tol=1e-4, maxiter=40)
        eigs = sorted(eigs)
        elapsed = time.clock() - tt
        assert_allclose(eigs, desired_evs)
        print(fmt % (shape, m, 'lobpcg', elapsed))

        tt = time.clock()
        w = eigh(A, B, eigvals_only=True, eigvals=(0, m-1))
        elapsed = time.clock() - tt
        assert_allclose(w, desired_evs)
        print(fmt % (shape, m, 'eigh', elapsed))
コード例 #11
0
ファイル: day4funcs.py プロジェクト: nanograv/cit-busyweek
def mark1loglikelihood(psr, Aw, Ar, Si):
    """
    Log-likelihood for our pulsar. This one does not marginalize
    over the timing model, so it cannot be used if the data has been
    'fit'. Use when creating data with 'dofit=False':
    psr = Pulsar(dofit=False)
    
    Calculate covariance matrix in the time-domain with:
    
    ll = -0.5 * res^{T} C^{-1} res - 0.5 * log(det(C))
    
    :param psr:
        pulsar object, containing the data and stuff

    :param Aw:
        White noise amplitude, model parameter

    :param Ar:
        Red noise amplitude, model parameter

    :param Si:
        Spectral index of red noise, model parameter
    """

    # The function that builds the non-diagonal covariance matrix is Cred_sec
    # Cov = Aw**2 * np.eye(len(psr.toas)) + \
    #      Ar**2 * Cred_sec(psr.toas, alpha=0.5*(3-Si))
    Cov = Aw ** 2 * np.eye(len(psr.toas)) + PL_covmat(psr.toas, Ar, alpha=0.5 * (3 - Si), fL=1.0 / (year * 20))

    cfC = sl.cho_factor(Cov)
    ldetC = 2 * np.sum(np.log(np.diag(cfC[0])))
    rCr = np.dot(psr.residuals, sl.cho_solve(cfC, psr.residuals))

    return -0.5 * rCr - 0.5 * ldetC - 0.5 * len(psr.residuals) * np.log(2 * np.pi)
コード例 #12
0
ファイル: utils.py プロジェクト: Sandy4321/bayespy
def chol(C):
    if sparse.issparse(C):
        # Sparse Cholesky decomposition (returns a Factor object)
        return cholmod.cholesky(C)
    else:
        # Dense Cholesky decomposition
        return linalg.cho_factor(C)[0]
コード例 #13
0
ファイル: LikelihoodFunctions.py プロジェクト: OxES/MyFuncs
def LogLikelihood_invK_mf_times_GP(p,t,kf,kf_args,mf,mf_args,n_hp):
  """
  Similar to LogLikelihood_invK_mf_and_kf, although the data is *divided* by
  the mean function before fitting a GP(1,C)
  """
  
  #create hyperparameter and mean function vectors
  hpar = p[:n_hp]
  mf_par = p[n_hp:]
    
  #residuals from *division* by mean function (and subtract 1)
  r = t / mf(mf_par,mf_args) - 1.
  
  #ensure r is an (n x 1) column vector
  r = np.matrix(np.array(r).flatten()).T

  #create the covariance matrix
  K = CovarianceMatrix(hpar,kf_args,KernelFunction=kf)
  
  #get log det and invert the covariance matrix (these need optimised!)
#   sign,logdetK = np.linalg.slogdet( K )
#   
#   logP = -0.5 * r.T * np.mat(LA.lu_solve(LA.lu_factor(K),r)) - 0.5 * logdetK - (r.size/2.) * np.log(2*np.pi)

  CI = LA.cho_factor(K,overwrite_a=1)
  logdetK = (2*np.log(np.diag(CI[0])).sum()) #compute log determinant
  logP = -0.5 * np.dot(r.T,LA.cho_solve(CI,r)) - 0.5 * logdetK - (r.size/2.) * np.log(2*np.pi)

  return np.float64(logP)
コード例 #14
0
ファイル: HTFSingleEvolving.py プロジェクト: jellis18/PAL
def HTFNull(psr, F, proj, SS, A, f, gam, efac, equad):
    """
    Lentati marginalized likelihood function only including efac and equad
    and power law coefficients

    @param psr: Pulsar class
    @param F: Fourier design matrix constructed in PALutils
    @param proj: Projection operator from white noise
    @param SS: Diagonalized white noise matrix
    @param A: Power spectrum Amplitude
    @param gam: Power spectrum index
    @param f: Frequencies at which to parameterize power spectrum (Hz)
    @param efac: constant multipier on error bar covaraince matrix term
    @param equad: Additional white noise added in quadrature to efac

    @return: LogLike: loglikelihood

    """

    diff = np.dot(proj, psr.res)

    # compute total time span of data
    Tspan = psr.toas.max() - psr.toas.min()

    # get power spectrum coefficients
    f1yr = 1 / 3.16e7
    rho = A ** 2 / 12 / np.pi ** 2 * f1yr ** (gam - 3) * f ** (-gam) / Tspan

    # compute d
    d = np.dot(F.T, diff / (efac * SS + equad ** 2))

    # compute Sigma
    N = 1 / (efac * SS + equad ** 2)
    right = (N * F.T).T
    FNF = np.dot(F.T, right)

    arr = np.zeros(2 * len(rho))
    ct = 0
    for ii in range(0, 2 * len(rho), 2):
        arr[ii] = rho[ct]
        arr[ii + 1] = rho[ct]
        ct += 1

    Phi = np.diag(10 ** arr)
    Sigma = FNF + np.diag(1 / arr)

    # cholesky decomp for second term in exponential
    cf = sl.cho_factor(Sigma)
    expval2 = sl.cho_solve(cf, d)
    logdet_Sigma = np.sum(2 * np.log(np.diag(cf[0])))

    dtNdt = np.sum(diff ** 2 / (efac * SS + equad ** 2))

    logdet_Phi = np.sum(np.log(arr))

    logdet_N = np.sum(np.log(efac * SS + equad ** 2))

    logLike = -0.5 * (logdet_N + logdet_Phi + logdet_Sigma) - 0.5 * (dtNdt - np.dot(d, expval2))

    return logLike
コード例 #15
0
ファイル: utilities.py プロジェクト: Sura82/colvb
def safe_GP_inv(K,w):
    """
    Arguments
    ---------
    K, a NxN pd matrix
    w, a N-vector

    Returns
    -------
    (K^-1 + diag(w))^-1
    and
    (1/2)*(ln|K^-1 + diag(w)| + ln|K|)
    and
    chol(K + diag(1./w))
    """
    N = w.size
    assert K.shape==(N,N)
    w_sqrt = np.sqrt(w)
    W_inv = np.diag(1./w)
    W_sqrt_inv = np.diag(1./w_sqrt)

    B = np.eye(N) + np.dot(w_sqrt[:,None],w_sqrt[None,:])*K
    cho = linalg.cho_factor(B)
    T = linalg.cho_solve(cho,W_sqrt_inv)
    ret = W_inv - np.dot(W_sqrt_inv,T)
    return ret, np.sum(np.log(np.diag(cho[0]))), (np.dot(cho[0],W_sqrt_inv), cho[1])
コード例 #16
0
ファイル: readprepset.py プロジェクト: derek-adair/piccard
def logLikelihood(model, h_c=5e-14, alpha=-2.0/3.0):
    # Obtain model information
    residuals = model[1]
    alphaab = model[5]
    times_f = model[0]
    gmat = model[4]
    psrobs = model[6]
    psrg = model[8]
    toaerrs = model[2]
    
    # Calculate the GW covariance matrix
    C = (h_c*h_c)*Cgw_sec(model, alpha=alpha, fL=1.0/500, approx_ksum=False)
    
    # Add the error bars
    C += np.diag(toaerrs*toaerrs)

    GCG = blockmul(C, gmat, psrobs, psrg)
    resid = np.dot(gmat.T, residuals)

    try:
        cf = sl.cho_factor(GCG)
        res = -0.5 * np.dot(resid, sl.cho_solve(cf, resid)) - 0.5 * len(resid) * np.log((2*np.pi)) - 0.5 * np.sum(np.log(np.diag(cf[0])**2))
    except np.linalg.LinAlgError:
        print "Problem inverting matrix at A = %s, alpha = %s:" % (A,alpha)

        raise

    return res
コード例 #17
0
def lsfit(locs1, locs2, index):
    '''
    This deermines the transformation to warp the image 
    onto the map
    '''
    #regressand -  the 'map' coordinates, which are dependent upon the 'image' 
    R = np.zeros([index.size*2]) 
    R[0:index.shape[0]] =  locs2[index,1] #map indexes (y)
    R[index.shape[0]:] = locs2[index,0] #map indexes (x)
        
    #design - the 'image' coordinates
    inputD = np.zeros([index.size, 3])
    inputD[:,0] = 1
    inputD[:,1] = locs1[index,1] #image indexes (r)
    inputD[:,2] = locs1[index,0] #image indexes (c)
        
    D = np.zeros([index.size*2, 6])
    D[0:index.size, 0:3] = inputD
    D[index.size:,3:] = inputD 
                    
    #derive the function to transform the image coordinates (X) onto the map (Y), so that Y=f(X)
    DT = D.T 
    DTD = np.dot(DT,D)
    DTR = np.dot(DT, R)
    L,lower = linalg.cho_factor(DTD, lower=True)
    return linalg.cho_solve((L,lower),DTR)  
コード例 #18
0
ファイル: dataterm.py プロジェクト: swenger/convexopt
    def backward(self, x, tau):
        # (1 + tau A^T A)^-1(x + tau A^T b)
        # which amounts to
        #   min_y ||A y - b||^2_F + tau * || y - x ||

        # TODO solve the dual when we have fat matrix

        if hasattr(self.A, 'A') and type(self.A.A) is _np.ndarray:
            # self.A is a dense matrix
            # we can pre-factorize the system using cholesky decomposition
            # and then quickly re-solve the system
            if self._solve_backward is None or self._solve_backward_tau != tau:
                from scipy.linalg import cho_factor, cho_solve

                A = self.A.A
                H = tau * A.T.dot(A) + _np.eye(A.shape[1])
                self._solve_backward = partial(cho_solve, cho_factor(H))
                self._solve_backward_tau = tau

            return self._solve_backward(x + tau * self.A.rmatvec(self.b))

        else:
            from scipy.sparse.linalg import lsqr, LinearOperator

            def matvec(y):
                return y + tau * self.A.rmatvec(self.A.matvec(y))
            x, istop, itn, r1norm, r2norm, anorm, acond, arnorm, xnorm, var = \
                lsqr(LinearOperator((self.A.shape[1], self.A.shape[1]), matvec, matvec),
                     x + tau * self.A.rmatvec(self.b))
            return x
コード例 #19
0
def lnprob(cube, ndim, nparams):
    
    # Log-likelihood ratio for eccentric single-source detection.

    
    x = np.array([cube[ii] for ii in range(nparams)])
    if len(x)==9:
        logmass, logdist, logorbfreq, gwphi, costheta, cosinc, gwpol, gwgamma, l0 = x
    else:
        logmass, logdist, logorbfreq, gwphi, costheta, cosinc, gwpol, gwgamma, l0, e0 = x
        
    mass = 10.0**logmass
    dist = 10.0**logdist
    orbfreq = 10.0**logorbfreq
    gwtheta = np.arccos(costheta)
    gwinc = np.arccos(cosinc)

    if len(x)==9:
        nharm = 3
    else:
        nharm = int(keplersum[keplersum[:,0]>e0][0][1])+1 #np.arange(1,int(keplersum[keplersum[:,0]>e0][0][1]),1)
    
    gwres = []
    sig_sub = []

    if len(x)==9:
        for ii,p in enumerate(psr):
            gwres.append( createResiduals_ecc(p, gwtheta, gwphi, mass, dist, orbfreq, gwinc, gwpol, gwgamma, 0.001, l0, Nharm=nharm, psrTerm=False) )
            sig_sub.append( p.res - gwres[ii] )
    else:
        for ii,p in enumerate(psr):
            gwres.append( createResiduals_ecc(p, gwtheta, gwphi, mass, dist, orbfreq, gwinc, gwpol, gwgamma, e0, l0, Nharm=nharm, psrTerm=False) )
            sig_sub.append( p.res - gwres[ii] )
    
    d = []
    dtNdt = []
    logLike = 0.0
    for ii,p in enumerate(psr):
        errs = p.toaerrs
        d.append( np.dot(p.Te.T, sig_sub[ii]/( errs**2.0 )) )

        # triple product in likelihood function
        dtNdt.append( np.sum(sig_sub[ii]**2.0/( errs**2.0 )) )

        loglike1 = -0.5 * (logdet_N[ii] + dtNdt[ii])
        # cholesky decomp for second term in exponential
        try:
            cf = sl.cho_factor(Sigma[ii])
            expval2 = sl.cho_solve(cf, d[ii])
            logdet_Sigma = np.sum(2*np.log(np.diag(cf[0])))

        except np.linalg.LinAlgError:
            print 'Cholesky Decomposition Failed second time!! Using SVD instead'
            u,s,v = sl.svd(Sigma[ii])
            expval2 = np.dot(v.T, 1/s*np.dot(u.T, d[ii]))
            logdet_Sigma = np.sum(np.log(s))
    
        logLike += -0.5 * (logdet_Phi[ii] + logdet_Sigma) + 0.5 * (np.dot(d[ii], expval2)) + loglike1

    return logLike
コード例 #20
0
ファイル: resampler.py プロジェクト: derek-adair/piccard
    def loglik_full(self, l_a, l_rho, Agw, gammagw):
        """
        Given all these parameters, calculate the full likelihood

        @param l_a:     List of Fourier coefficient arrays for all pulsars
        @param l_rho:   List of arrays of log10(PSD) amplitudes for all pulsars
        @param Agw:     log10(GW amplitude)
        @param gammagw: GWB spectral index

        @return:        Log-likelihood
        """
        # Transform the GWB parameters to PSD coefficients (pc)
        pc_gw = self.gwPSD(Agw, gammagw)
        
        rv = 0.0
        for ii, freq in enumerate(self.freqs):
            a_cos = l_a[:,2*ii]         # Cosine modes for f=freq
            a_sin = l_a[:,2*ii+1]       # Sine modes for f=freq
            rho = l_rho[:,ii]           # PSD amp for f=freq

            # Covariance matrix is the same for sine and cosine modes
            cov = np.diag(10**rho) + self.hdmat * pc_gw[ii]
            cf = sl.cho_factor(cov)
            logdet = 2*np.sum(np.log(np.diag(cf[0])))
            
            # Add the log-likelihood for the cosine and the sine modes
            rv += -0.5 * np.dot(a_cos, sl.cho_solve(cf, a_cos)) - \
                   0.5 * np.dot(a_sin, sl.cho_solve(cf, a_sin)) - \
                   2*self.Npsr*np.log(2*np.pi) - logdet

        return rv
コード例 #21
0
ファイル: segdetrender.py プロジェクト: Cadair/k2sc
    def predict(self, pv, flux=None, inputs=None, inputs_pred=None, mean_only=True, splits=None):
        flux = flux if flux is not None else self.data.masked_flux
        iptr = inputs if inputs is not None else self.data.masked_inputs
        ippr = inputs_pred if inputs_pred is not None else iptr

        K0 = self.compute_cmat(pv, iptr, iptr, add_wn=False, splits=splits)
        K  = K0 + self._pv[-1]**2 * identity(K0.shape[0])
        if inputs_pred is None:
            Ks  = K0.copy()
            Kss = K.copy()
        else:
            Ks  = self.compute_cmat(pv, ippr, ippr, add_wn=False, splits=splits)
            Kss = self.compute_cmat(pv, ippr, ippr, add_wn=True, splits=splits)

        L = sla.cho_factor(K)
        b = sla.cho_solve(L, flux)
        mu = dot(Ks, b)

        if mean_only:
            return mu
        else:
            b = sla.cho_solve(L, Ks.T)
            cov = Kss - dot(Ks, b)
            err = np.sqrt(diag(cov))
            return mu, err
コード例 #22
0
ファイル: spt_s12.py プロジェクト: marius311/cosmoslik-uspype
    def init(self, p):
        
        self.datadir = os.path.join(os.path.dirname(__file__),'spt_lps12_20120828')
            
        #Load spectrum and covariance
        fn = 'Spectrum_spt2500deg2_lps12%s.newdat'%('_nocal' if ('spt_s12','a_calib') in p else '')
        with open(os.path.join(self.datadir,fn)) as f:
            while 'TT' not in f.readline(): pass
            self.spec=array([fromstring(f.readline(),sep=' ')[1] for _ in range(47)])
            self.sigma=array([fromstring(f.readline(),sep=' ') for _ in range(94)])[47:]
            
        #Load windows
        self.windows = [loadtxt(os.path.join(self.datadir,'windows','window_lps12','window_%i'%i))[:,1] for i in range(1,48)]
        
        if ('spt_s12','lmin') in p:
            bmin = sum(1 for _ in takewhile(lambda x: x<p['spt_s12','lmin'], (sum(1 for _ in takewhile(lambda x: abs(x)<.001,w) ) for w in self.windows)))
            self.spec = self.spec[bmin:]
            self.sigma = self.sigma[bmin:,bmin:]
            self.windows = self.windows[bmin:]
        
        self.errorbars = sqrt(diag(self.sigma))
        self.sigma = cho_factor(self.sigma)
        
        self.windowrange = (lambda x: slice(min(x),max(x)+1))(loadtxt(os.path.join(self.datadir,'windows','window_lps12','window_1'))[:,0])
        self.lmax = self.windowrange.stop
        self.ells = array([dot(arange(10000)[self.windowrange],w) for w in self.windows])

        self.freq = {'dust':154, 'radio': 151, 'tsz':153}
        self.fluxcut = 50

        self.calib_prior = p.get(('spt_s12','calib_prior'),True)
コード例 #23
0
ファイル: segdetrender.py プロジェクト: Cadair/k2sc
 def negll(self, pv=None, flux=None, inputs=None, splits=None):
     flux = flux if flux is not None else self.data.masked_normalised_flux
     inputs = inputs if inputs is not None else self.data.masked_inputs
     K = self.compute_cmat(pv, inputs, inputs, splits=splits, add_wn=True)
     L = sla.cho_factor(K)
     b = sla.cho_solve(L, flux)
     return log(diag(L[0])).sum() + 0.5 * dot(flux,b)
コード例 #24
0
ファイル: InferGP.old.py プロジェクト: OxES/Infer
  def logLikelihood(self,p):
    "Function to calculate the log likeihood"
    
    #calculate the residuals
    r = self.t - self.mf(p[self.n_hp:],self.mf_args)
    
    #ensure r is an (n x 1) column vector
    r = np.matrix(np.array(r).flatten()).T
    
    #calculate covariance matrix, cholesky factor and logdet if hyperparameters change
#     print "pars:", self.pars, type(self.pars)
#     print "p:", p, type(p)
#     print p[:self.n_hp] != self.pars[:self.n_hp]
#     print np.all(p[:self.n_hp] != self.pars[:self.n_hp])
    if self.pars == None or np.all(p[:self.n_hp] != self.pars[:self.n_hp]):
#       print "no :("
      self.K = GPC.CovarianceMatrix(p[:self.n_hp],self.kf_args,KernelFunction=self.kf)
      self.ChoFactor = LA.cho_factor(self.K)#,overwrite_a=1)
      self.logdetK = (2*np.log(np.diag(self.ChoFactor[0])).sum())
#     else: print "yeah!!"
    
    #store the new parameters
    self.pars = np.copy(p)
    
    #calculate the log likelihood
    logP = -0.5 * r.T * np.mat(LA.cho_solve(self.ChoFactor,r)) - 0.5 * self.logdetK - (r.size/2.) * np.log(2*np.pi)
    
    return np.float(logP)
コード例 #25
0
ファイル: GPClass.py プロジェクト: nealegibson/GeePea
    def logLikelihood_cholesky(self, p):
        "Function to calculate the log likeihood"

        # calculate the residuals
        r = self.y - self.mf(p[: self._n_mfp], self.xmf)

        # ensure r is an (n x 1) column vector
        r = np.matrix(np.array(r).flatten()).T

        # check if covariance, chol factor and log det are already calculated and stored
        new_hash = hash(p[-self.n_hp :].tostring())  # calculate and check the hash
        if np.any(self.hp_hash == new_hash):
            useK = np.where(self.hp_hash == new_hash)[0][0]
        else:  # else calculate and store the new hash, cho_factor and logdetK
            useK = self.si = (self.si + 1) % self.n_store  # increment the store index number
            #      self.choFactor[self.si] = LA.cho_factor(GPC.CovarianceMatrix(p[self._n_mfp:],self.x,KernelFunction=self.kf))
            self.choFactor[self.si] = LA.cho_factor(self.CovMat_p(p))
            self.logdetK[self.si] = 2 * np.log(np.diag(self.choFactor[self.si][0])).sum()
            self.hp_hash[self.si] = new_hash

        # calculate the log likelihood
        logP = (
            -0.5 * r.T * np.mat(LA.cho_solve(self.choFactor[useK], r))
            - 0.5 * self.logdetK[useK]
            - (r.size / 2.0) * np.log(2 * np.pi)
        )

        return np.float(logP)
コード例 #26
0
ファイル: OMGP.py プロジェクト: tomasgomes/GPclust
    def update_kern_grads(self):
        """
        Set the derivative of the lower bound wrt the (kernel) parameters
        """
        for i, kern in enumerate(self.kern):
            K = kern.K(self.X)
            B_inv = np.diag(1. / (self.phi[:, i] / self.variance))

            alpha = linalg.cho_solve(linalg.cho_factor(K + B_inv), self.Y)
            K_B_inv = pdinv(K + B_inv)[0]

            dL_dK = np.outer(alpha, alpha) - K_B_inv

            kern.update_gradients_full(dL_dK=dL_dK, X=self.X)

        # variance gradient

        grad_Lm_variance = 0.0
        for i, kern in enumerate(self.kern):
            K = kern.K(self.X)
            I = np.eye(self.N)

            B_inv = np.diag(1. / ((self.phi[:, i] + 1e-6) / self.variance))
            alpha = np.linalg.solve(K + B_inv, self.Y)
            K_B_inv = pdinv(K + B_inv)[0]
            dL_dB = np.outer(alpha, alpha) - K_B_inv
            grad_B_inv = np.diag(1. / (self.phi[:, i] + 1e-6))

            grad_Lm_variance += 0.5 * np.trace(np.dot(dL_dB, grad_B_inv))

            self.variance.gradient = grad_Lm_variance
コード例 #27
0
ファイル: linalg.py プロジェクト: zpace/stellarmass_pca
def spla_chol_invert(K, eye):
    '''
    invert a positive-definite matrix using cholesky decomposition
    '''
    Ltup = spla.cho_factor(K, lower=True)
    K_inv = spla.cho_solve(Ltup, eye, check_finite=False)
    return K_inv
コード例 #28
0
ファイル: signal_base.py プロジェクト: jellis18/enterprise
    def _solve_ZNX(self, X, Z):
        """Solves :math:`Z^T N^{-1}X`, where :math:`X`
        and :math:`Z` are 1-d or 2-d arrays.
        """
        if X.ndim == 1:
            X = X.reshape(X.shape[0], 1)
        if Z.ndim == 1:
            Z = Z.reshape(Z.shape[0], 1)

        n, m = Z.shape[1], X.shape[1]
        ZNX = np.zeros((n, m))
        if len(self._idx) > 0:
            ZNXr = np.dot(Z[self._idx,:].T, X[self._idx,:] /
                          self._nvec[self._idx, None])
        else:
            ZNXr = 0
        for slc, block in zip(self._slices, self._blocks):
            Zblock = Z[slc, :]
            Xblock = X[slc, :]

            if slc.stop - slc.start > 1:
                cf = sl.cho_factor(block+np.diag(self._nvec[slc]))
                bx = sl.cho_solve(cf, Xblock)
            else:
                bx = Xblock / self._nvec[slc][:, None]
            ZNX += np.dot(Zblock.T, bx)
        ZNX += ZNXr
        return ZNX.squeeze() if len(ZNX) > 1 else float(ZNX)
コード例 #29
0
ファイル: parallel_linear.py プロジェクト: EdGillen/Starfish
    def evaluate(self):
        '''
        Return the lnprob using the current version of the C_GP matrix, data matrix,
        and other intermediate products.
        '''

        self.lnprob_last = self.lnprob

        CC = self.data_mat

        model = self.chebyshevSpectrum.k * self.flux

        try:

            factor, flag = cho_factor(CC)

            R = self.fl - model

            logdet = np.sum(2 * np.log((np.diag(factor))))
            self.lnprob = -0.5 * (np.dot(R, cho_solve((factor, flag), R)) + logdet)

            self.logger.debug("Evaluating lnprob={}".format(self.lnprob))
            return self.lnprob

        # To give us some debugging information about what went wrong.
        except np.linalg.linalg.LinAlgError:
            print("Spectrum:", self.spectrum_id, "Order:", self.order)
            raise
コード例 #30
0
    def __init__(self, 
                 X,
                 sigma,
                 offset=None,
                 quadratic=None,
                 initial=None):
        """
        Parameters
        ----------

        X : np.ndarray
            Design matrix.

        sigma : float
            Known standard deviation.

        """

        rr.smooth_atom.__init__(self,
                                (X.shape[1],),
                                offset=offset,
                                quadratic=quadratic,
                                initial=initial)

        self.X = X
        self.sigma = sigma
        self._cholX = cho_factor(X.T.dot(X))
コード例 #31
0
    def integrate_kde(self, other):
        """
        Computes the integral of the product of this  kernel density estimate
        with another.

        Parameters
        ----------
        other : gaussian_kde instance
            The other kde.

        Returns
        -------
        value : scalar
            The result of the integral.

        Raises
        ------
        ValueError
            If the KDEs have different dimensionality.

        """
        if other.d != self.d:
            raise ValueError("KDEs are not the same dimensionality")

        # we want to iterate over the smallest number of points
        if other.n < self.n:
            small = other
            large = self
        else:
            small = self
            large = other

        sum_cov = small.covariance + large.covariance
        sum_cov_chol = linalg.cho_factor(sum_cov)
        result = 0.0
        for i in range(small.n):
            mean = small.dataset[:, i, newaxis]
            diff = large.dataset - mean
            tdiff = linalg.cho_solve(sum_cov_chol, diff)

            energies = sum(diff * tdiff, axis=0) / 2.0
            result += sum(exp(-energies), axis=0)

        result /= sqrt(linalg.det(2 * pi * sum_cov)) * large.n * small.n

        return result
コード例 #32
0
    def _m_step_bloc_diagonal(self):

        if self.fit_grey:
            Y = copy.deepcopy(self.y) - (self.h[:,1]*np.ones(np.shape(self.y)).T).T
        else:
            Y = copy.deepcopy(self.y)

        new_slopes = np.zeros(np.shape(self.A[:,self.filter_grey]))

        h_ht_dict = {}
        
        for i in range(self.nbin/self.size_bloc):
            for sn in range(self.nsn):
                h_ht=np.dot(np.matrix(self.h[sn,self.filter_grey]).T,np.matrix(self.h[sn,self.filter_grey]))
                
                if self.sparse:
                    W_sub=sugar.sed_fitting.extract_block_diag(self.wy[sn],self.size_bloc,i)
                else:
                    W_sub = self.wy[sn][i*self.size_bloc:(i+1)*self.size_bloc, i*self.size_bloc:(i+1)*self.size_bloc]
                
                hh_kron_W_sub = linalg.kron(h_ht, W_sub)
                WYh = np.dot(W_sub, np.dot(np.matrix(Y[sn,i*self.size_bloc:(i+1)*self.size_bloc]).T,
                                           np.matrix(self.h[sn,self.filter_grey])))

                if sn == 0:
                    hh_kron_W_sum = np.copy(hh_kron_W_sub)
                    WYh_sum = np.copy(WYh)
                else:
                    hh_kron_W_sum += hh_kron_W_sub
                    WYh_sum += WYh

            h_ht_dict[i] = [hh_kron_W_sum, WYh_sum]
                    
        for wl in h_ht_dict.keys():
            hh_kron_W_sum, W_sum = h_ht_dict[wl]
            sum_WYh_vector = np.zeros(self.size_bloc*self.nslopes)
            for i in xrange(self.nslopes):   
                sum_WYh_vector[i*self.size_bloc:][:self.size_bloc]=W_sum[:,i].ravel()

            X_cho = linalg.cho_factor(hh_kron_W_sum)
            slopes_solve = linalg.cho_solve(X_cho, sum_WYh_vector)
            for i in xrange(self.nslopes):
                new_slopes[wl*self.size_bloc:(wl+1)*self.size_bloc,i] = slopes_solve[i*self.size_bloc:(i+1)*self.size_bloc]
            
        self.A[:,self.filter_grey]=new_slopes
        return new_slopes
コード例 #33
0
 def compute_desired_accel(self, qpos_err, qvel_err):
     dt = self.model.opt.timestep
     nv = self.model.nv
     M = np.zeros(nv * nv)
     mjf.mj_fullM(self.model, M, self.data.qM)
     M.resize(self.model.nv, self.model.nv)
     C = self.data.qfrc_bias.copy()
     k_p = np.zeros(nv)
     k_d = np.zeros(nv)
     k_p[6:] = self.cfg.jkp
     k_d[6:] = self.cfg.jkd
     K_p = np.diag(k_p)
     K_d = np.diag(k_d)
     q_accel = cho_solve(
         cho_factor(M + K_d * dt), -C[:, None] -
         K_p.dot(qpos_err[:, None]) - K_d.dot(qvel_err[:, None]))
     return q_accel.squeeze()
コード例 #34
0
ファイル: gp_grief_model.py プロジェクト: scwolof/gp_grief
    def _cov_setup(self):
        """
        setup the covariance matrix
        """
        if self._P is not None:  # then already computed so return
            return
        # get the weights
        self._w = self.kern.w

        # get the p x p matrix A if ness
        if self._A is None:  # then compute, note this is expensive
            self._Phi = self.kern.cov(self.X)[0]
            self._A = self._Phi.T.dot(self._Phi)  # O(np^2) operation!

        # compute the P matrix and factorize
        self._P = self._A + np.diag(self.noise_var / self._w)
        self._Pchol = cho_factor(self._P)
コード例 #35
0
    def __init__(self, functional_kernel, Xs, Ys):
        super().__init__(functional_kernel, Ys)

        Xs = np.vstack(Xs)
        dists = ExactLMCLikelihood._gen_dists(functional_kernel.active_dims,
                                              Xs, Xs)

        self.materialized_kernels = self.functional_kernel.eval_kernels(dists)
        self.K = sum(
            self._personalized_coreg_scale(A, Kq)
            for A, Kq in zip(self.functional_kernel.coreg_mats(),
                             self.materialized_kernels))
        self.K += np.diag(np.repeat(functional_kernel.noise, self.lens))
        self.L = la.cho_factor(self.K)
        self.deriv = ExactDeriv(self.L, self.y)
        self.materialized_grads = self.functional_kernel.eval_kernel_gradients(
            dists)
コード例 #36
0
    def log_likelihood(self, p):
        p = self.to_params(p)

        v = self.rvs(p)

        res = self.vs - v - p['mu']

        cov = p['nu'] * p['nu'] * np.diag(self.dvs * self.dvs)
        cov += generate_covariance(self.ts, p['sigma'], p['tau'])

        cfactor = sl.cho_factor(cov)
        cc, lower = cfactor

        n = self.ts.shape[0]

        return -0.5 * n * np.log(2.0 * np.pi) - np.sum(np.log(
            np.diag(cc))) - 0.5 * np.dot(res, sl.cho_solve(cfactor, res))
コード例 #37
0
ファイル: clustered_kde.py プロジェクト: kjkellyphys/kombine
    def _set_bandwidth(self):
        """
        Use Scott's rule to set the kernel bandwidth.  Also store Cholesky
        decomposition for later.
        """
        if self._N > 0:
            self._kernel_cov = self._cov * self._N**(-2. / (self._dim + 4))

            # Used to evaluate PDF with cho_solve()
            self._cho_factor = la.cho_factor(self._kernel_cov)

            # Make sure the estimated PDF integrates to 1.0
            self._lognorm = self._dim/2.0 * np.log(2.0*np.pi) + np.log(self._N) +\
                np.sum(np.log(np.diag(self._cho_factor[0])))

        else:
            self._lognorm = -np.inf
コード例 #38
0
def stable_cho_factor(x, tiny=_TINY):
    """
    NAME:
       stable_cho_factor
    PURPOSE:
       Stable version of the cholesky decomposition
    INPUT:
       x - (sc.array) positive definite matrix
       tiny - (double) tiny number to add to the covariance matrix to make the decomposition stable (has a default)
    OUTPUT:
       (L,lowerFlag) - output from scipy.linalg.cho_factor for lower=True
    REVISION HISTORY:
       2009-09-25 - Written - Bovy (NYU)
    """
    return linalg.cho_factor(
        x + numpy.sum(numpy.diag(x)) * tiny * numpy.eye(x.shape[0]),
        lower=True)
コード例 #39
0
def main():
    batch_size = 1000000
    dim = 256
    X = np.random.normal(size=[batch_size, dim])
    A = np.matmul(X.T, X)

    Xtest = np.random.normal(size=[batch_size, dim])
    Ainv = la.solve(A, np.eye(dim), sym_pos=True)

    cho_factor = la.cho_factor(A)
    cholA = la.cholesky(A)

    #Method 0
    start = time.time()
    predict0 = np.sum(np.multiply(np.matmul(Xtest, Ainv), Xtest), axis=-1)
    time0 = time.time() - start
    '''
    predict0 = np.sum(np.multiply(Xtest, la.solve(A, Xtest.T, sym_pos=True).T), axis=-1)
    predict0_2 = np.sum(np.multiply(Xtest, la.cho_solve((cholA, False), Xtest.T).T), axis=-1)
    '''

    #Method 1
    start = time.time()
    predict0_1 = np.sum(np.multiply(Xtest,
                                    la.cho_solve(cho_factor, Xtest.T).T),
                        axis=-1)
    time1 = time.time() - start

    #Method 2
    start = time.time()
    predict1 = np.sum(np.square(la.solve_triangular(cholA, Xtest.T)), axis=0)
    time2 = time.time() - start
    '''
    for p0, p0_1, p0_2, p1 in zip(predict0, predict0_1, predict0_2, predict1):
        print p0, p0_1, p0_2, p1
    print np.allclose(predict0, predict0_1)
    print np.allclose(predict0, predict0_2)
    print np.allclose(predict0, predict1)
    '''
    '''
    for p0, p1 in zip(predict0, predict1):
        print p0, p1
    '''
    print time0
    print time1
    print time2
コード例 #40
0
ファイル: tools.py プロジェクト: rodluger/everest-workshop
def GetChunkData(star, chunk, joint_fit=False):
    '''
  
  '''

    # These are the unmasked indices for the current chunk
    m = star.get_masked_chunk(chunk, pad=False)

    # Get the covariance matrix for this chunk
    K = GetCovariance(star.kernel, star.kernel_params, star.time[m],
                      star.fraw_err[m])

    # Are we marginalizing over the systematics model?
    # If so, include the PLD covariance in K and do the
    # search on the *raw* light curve.
    if joint_fit:
        A = np.zeros((len(m), len(m)))
        for n in range(star.pld_order):
            XM = star.X(n, m)
            A += star.lam[chunk][n] * np.dot(XM, XM.T)
        K += A
        flux = star.fraw[m]
    else:
        flux = star.flux[m]

    # Compute the Cholesky factorization of K
    C = cho_factor(K)

    # Create a uniform time array and get indices of missing cadences
    dt = np.median(np.diff(star.time[m]))
    tol = np.nanmedian(np.diff(star.time[m])) / 5.
    tunif = np.arange(star.time[m][0], star.time[m][-1] + tol, dt)
    time = np.array(tunif)
    gaps = []
    j = 0
    for i, t in enumerate(tunif):
        if np.abs(star.time[m][j] - t) < tol:
            time[i] = star.time[m][j]
            j += 1
            if j == len(star.time[m]):
                break
        else:
            gaps.append(i)
    gaps = np.array(gaps, dtype=int)

    return time, gaps, flux, C
コード例 #41
0
ファイル: bayesquad.py プロジェクト: jacobnzw/icinco-code
    def weights_rbf(self, unit_sp, hypers):
        # BQ weights for RBF kernel with given hypers, computations adopted from the GP-ADF code [Deisenroth] with
        # the following assumptions:
        #   (A1) the uncertain input is zero-mean with unit covariance
        #   (A2) one set of hyper-parameters is used for all output dimensions (one GP models all outputs)
        d, n = unit_sp.shape
        # GP kernel hyper-parameters
        alpha, el, jitter = hypers['sig_var'], hypers['lengthscale'], hypers[
            'noise_var']
        assert len(el) == d
        # pre-allocation for convenience
        eye_d, eye_n = np.eye(d), np.eye(n)
        iLam1 = np.atleast_2d(np.diag(el**-1))  # sqrt(Lambda^-1)
        iLam2 = np.atleast_2d(np.diag(el**-2))

        inp = unit_sp.T.dot(
            iLam1
        )  # sigmas / el[:, na] (x - m)^T*sqrt(Lambda^-1) # (numSP, xdim)
        K = np.exp(2 * np.log(alpha) - 0.5 * maha(inp, inp))
        iK = cho_solve(cho_factor(K + jitter * eye_n), eye_n)
        B = iLam2 + eye_d  # (D, D)
        c = alpha**2 / np.sqrt(det(B))
        t = inp.dot(inv(B))  # inn*(P + Lambda)^-1
        l = np.exp(-0.5 * np.sum(inp * t, 1))  # (N, 1)
        zet = 2 * np.log(alpha) - 0.5 * np.sum(inp * inp, 1)
        inp = inp.dot(iLam1)
        R = 2 * iLam2 + eye_d
        t = 1 / np.sqrt(det(R))
        L = np.exp((zet[:, na] + zet[:, na].T) +
                   maha(inp, -inp, V=0.5 * inv(R)))
        q = c * l  # evaluations of the kernel mean map (from the viewpoint of RHKS methods)
        # mean weights
        wm_f = q.dot(iK)
        iKQ = iK.dot(t * L)
        # covariance weights
        wc_f = iKQ.dot(iK)
        # cross-covariance "weights"
        wc_fx = np.diag(q).dot(iK)
        self.iK = iK
        # used for self.D.dot(x - mean).dot(wc_fx).dot(fx)
        self.D = inv(eye_d +
                     np.diag(el**2))  # S(S+Lam)^-1; for S=I, (I+Lam)^-1
        # model variance; to be added to the covariance
        # this diagonal form assumes independent GP outputs (cov(f^a, f^b) = 0 for all a, b: a neq b)
        self.model_var = np.diag((alpha**2 - np.trace(iKQ)) * np.ones((d, 1)))
        return wm_f, wc_f, wc_fx
コード例 #42
0
    def standardized_forecasts_error(self):
        """
        Standardized forecast errors
        """
        if self._standardized_forecasts_error is None:
            from scipy import linalg
            self._standardized_forecasts_error = np.zeros(
                self.forecasts_error.shape, dtype=self.dtype)

            for t in range(self.forecasts_error_cov.shape[2]):
                upper, _ = linalg.cho_factor(self.forecasts_error_cov[:, :, t],
                                             check_finite=False)
                self._standardized_forecasts_error[:, t] = (
                    linalg.solve_triangular(upper, self.forecasts_error[:, t],
                                            check_finite=False))

        return self._standardized_forecasts_error
コード例 #43
0
ファイル: model.py プロジェクト: dfm/rotate
    def get_weights(self):
        log_lams = self.pld.get_parameter_vector()
        A = self.pld.A
        fsap = self.fsap
        gp = self.gp

        alpha = np.dot(A.T, gp.apply_inverse(fsap - gp.mean.value)[:, 0])
        ATKinvA = np.dot(A.T, gp.apply_inverse(A))
        S = np.array(ATKinvA)
        dids = np.diag_indices_from(S)
        for bid, (s, f) in enumerate(self.pld.block_inds):
            S[(dids[0][s:f], dids[1][s:f])] += np.exp(-log_lams[bid])
        factor = cho_factor(S, overwrite_a=True)
        alpha -= np.dot(ATKinvA, cho_solve(factor, alpha))
        for bid, (s, f) in enumerate(self.pld.block_inds):
            alpha[s:f] *= np.exp(log_lams[bid])
        return alpha
コード例 #44
0
ファイル: model_mpgln.py プロジェクト: ptrubey/projgamma
 def initialize_sampler(self, nSamp):
     self.samples = Samples(nSamp, self.nDat, self.nCol, self.nMix)
     self.curr_iter = 0
     self.samples.mu[0] = self.priors.mu.mu + self.priors.mu.SCho @ normal(
         size=self.nCol)
     self.samples.Sigma[0] = invwishart.rvs(df=self.priors.Sigma.nu,
                                            scale=self.priors.Sigma.psi)
     self.samples.pi[0] = 1 / self.nMix
     alpha_new, beta_new = self.sample_alpha_beta_new(
         self.curr_mu, cho_factor(self.curr_Sigma), self.nMix)
     self.samples.alpha[0] = alpha_new
     self.samples.beta[0] = beta_new
     self.samples.r[0] = 1.
     self.samples.delta[0] = self.sample_delta(self.curr_alpha,
                                               self.curr_beta, self.curr_r,
                                               self.curr_pi)
     return
コード例 #45
0
ファイル: linear.py プロジェクト: tigerneil/irspack
    def _learn(self) -> None:
        if self.fit_intercept:
            X_profile_local = enlarge_profile(self.X_profile)
        else:
            X_profile_local = self.X_profile

        if sps.issparse(X_profile_local):
            X_profile_local = X_profile_local.toarray()
        X_l = X_profile_local.T.dot(X_profile_local)
        index = np.arange(X_l.shape[0])
        X_l[index, index] += self.reg
        inv = np.zeros_like(X_l)
        inv[index, index] = 1
        C_, lower = linalg.cho_factor(X_l, overwrite_a=True)
        inv = linalg.cho_solve((C_, lower), inv, overwrite_b=True)
        self.W = inv.dot(self.X_interaction.T.dot(X_profile_local).T)
        self.W = self.W.reshape(self.W.shape, order="F")
コード例 #46
0
    def _nlml_grad(log_par, kernel, fcn_obs, x_obs):
        # convert from log-par to par
        par = np.exp(log_par)

        num_data = x_obs.shape[1]
        K = kernel.eval(par, x_obs)  # (N, N)
        L = la.cho_factor(K)
        a = la.cho_solve(L, fcn_obs)  # (N, )
        a_out_a = np.outer(
            a, a.T)  # (N, N) sum over of outer products of columns of A

        # negative marginal log-likelihood derivatives w.r.t. hyper-parameters
        dK_dTheta = kernel.der_par(par, x_obs)  # (N, N, num_par)
        # iK = la.solve(K, np.eye(num_data))
        # return 0.5 * np.trace((iK - a_out_a).dot(dK_dTheta))  # (num_par, )
        iKdK = la.cho_solve(L, dK_dTheta)
        return 0.5 * np.trace((iKdK - a_out_a.dot(dK_dTheta)))  # (num_par, )
コード例 #47
0
def norm_cov_sampling(mu, cov, nsamples=1000000):
    """
    Numerical estimate of normalized covariance from samples.

    """
    K = cov.shape[0]
    u = np.random.randn(K, nsamples)
    L = np.tril(cho_factor(cov, lower=True)[0])

    # Draw a bunch of samples
    x = mu + L @ u

    # Normalize each one to its mean
    xnorm = x / np.mean(x, axis=0).reshape(1, -1)

    # Compute the sample covariance
    return np.cov(xnorm)
コード例 #48
0
def cloud_logprior(times,
                   hpmap_cube,
                   mu,
                   sigma,
                   lambda_time,
                   lambda_spatial,
                   nest=False):
    """Returns the GP prior on the time-varying map with exponential covariance
    function.

    :param hpmap_cube: Data cube containing a time-varying Healpix map, with
                        time along the first axis, on which the prior is to be
                        evaluated.

    :param mu: Mean of the GP

    :param sigma: Standard deviation at zero time and angular separation.

    :param lambda_time: Temporal correlation scale.

    :param lambda_spacial: Spacial correlation scale.

    :param nest: The ordering of the healpix map.

    """

    nside = hp.npix2nside(hpmap_cube.shape[1])
    n = np.product(hpmap_cube.shape)

    cov = sigma * sigma * gaussian_cov(
        times, nside, lambda_spatial, lambda_time, nest=nest)
    cho_factor, lower = sl.cho_factor(cov)

    # Convert to GP parameter and calculate Jacobian
    gp_data = np.log(hpmap_cube) - np.log(1 - hpmap_cube)

    x = np.array(gp_data - mu).flatten()
    jacobian = np.sum(-np.log(hpmap_cube * (1. - hpmap_cube)))

    logdet = np.sum(np.log(np.diag(cho_factor)))

    lnprior = -0.5*n*np.log(2.0*np.pi) - logdet + jacobian -\
        0.5*np.dot(x, sl.cho_solve((cho_factor, lower), x))

    return lnprior
コード例 #49
0
    def __init__(self, At, b, c, K, P, options):
        # Problem statement components
        self.At = At
        self.b = b
        self.c = c
        self.K = K
        self.P = P

        # Useful constants
        self.rho = options["rho"]
        self.sigma = options["sigma"]
        self.lamb = options["lamb"]

        # Lagrange multiplier vectors to be updated at each cycle
        self.zeta = np.ones(shape=(self.b.shape[0], 1))
        self.eta = np.ones(shape=(self.c.shape[0], 1))

        # Initialise local s vector containing extracted components on the full y vector
        self.s = np.zeros(shape=(self.b.shape[0], 1))

        # Initialise local z cost vector for ensuring problem remains conic
        zeroCones = np.zeros(shape=(K["f"], 1))  # Equality: 0s
        nnOrthants = np.ones(shape=(K["l"], 1))  # Inequality: 1s
        PSDs = [np.identity(size).reshape((size**2, 1))
                for size in K["s"]]  # PSD: identity
        self.z = vstack([zeroCones, nnOrthants,
                         *PSDs])  # Stack up the vectors for constraints

        # Generate matrix: L = rho_i*Pt*P (static if rho kept constant)
        self.L = self.rho * self.P.transpose() * self.P
        # Generate matrix: R = rho * I + sigma * A * At (static if rho and sigma kept constant), and its inverse
        # TODO: Use choleskyAAt
        self.R = csc_matrix(self.rho * identity(len(self.s)) +
                            self.sigma * self.At.transpose() * self.At)
        # self.Rinv = scipy.sparse.linalg.inv(self.R)  # To be removed, since will be solved with cholesky
        # Replace inverse computation with cholesky factorisation (lower triangular matrix)
        self.R_chol, self.low = cho_factor(self.R.todense())

        self.yUpdateVector = self.P.transpose() * (
            self.zeta + self.rho * self.s)  # Initialise first y updating value

        # Initialise initial primary and dual residuals
        self.primaryResidual = np.linalg.norm(self.c - self.At * self.s -
                                              self.z)
        self.dualResidual = float("inf")
コード例 #50
0
    def _calculate_log_likelihood(self):
        """Calculates the log-likelihood (up to a constant) for a given
        self.theta.
        """
        R = zeros((self.n, self.n))
        X, Y = self.X, self.Y
        thetas = power(10., self.thetas)

        # exponentially weighted distance formula
        for i in xrange(self.n):
            R[i, i + 1:self.n] = exp(
                -thetas.dot(square(X[i, ...] - X[i + 1:self.n, ...]).T))

        R *= (1.0 - self.nugget)
        R += R.T + eye(self.n)
        self.R = R

        one = ones(self.n)
        rhs = column_stack([Y, one])
        try:
            # Cholesky Decomposition
            self.R_fact = cho_factor(R)
            sol = cho_solve(self.R_fact, rhs)
            solve = lambda x: cho_solve(self.R_fact, x)
            det_factor = log(abs(prod(diagonal(self.R_fact[0]))**2) + 1.e-16)

        except (linalg.LinAlgError, ValueError):
            # Since Cholesky failed, try linear least squares
            self.R_fact = None  # reset this to none, so we know not to use Cholesky
            sol = lstsq(self.R, rhs)[0]
            solve = lambda x: lstsq(self.R, x)[0]
            det_factor = slogdet(self.R)[1]

        self.mu = dot(one, sol[:, :-1]) / dot(one, sol[:, -1])
        y_minus_mu = Y - self.mu
        self.R_solve_ymu = solve(y_minus_mu)
        self.R_solve_one = sol[:, -1]
        self.sig2 = dot(y_minus_mu.T, self.R_solve_ymu) / self.n

        if isinstance(self.sig2, ndarray):
            self.log_likelihood = -self.n / 2. * slogdet(self.sig2)[1] \
                                  - 1. / 2. * det_factor
        else:
            self.log_likelihood = -self.n / 2. * log(self.sig2) \
                                  - 1. / 2. * det_factor
コード例 #51
0
ファイル: likelihood.py プロジェクト: zhexingli/radvel
    def logprob(self):
        """
        Return GP log-likelihood given the data and model.

        log-likelihood is computed using Cholesky decomposition as:

        .. math::

           lnL = -0.5r^TK^{-1}r - 0.5ln[det(K)] - 0.5N*ln(2pi)

        where r = vector of residuals (GPLikelihood._resids),
        K = covariance matrix, and N = number of datapoints.

        Priors are not applied here.
        Constant has been omitted.

        Returns:
            float: Natural log of likelihood

        """
        # update the Kernel object hyperparameter values
        self.update_kernel_params()

        r = self._resids()

        self.kernel.compute_covmatrix(self.errorbars())

        K = self.kernel.covmatrix

        # solve alpha = inverse(K)*r
        try:
            alpha = cho_solve(cho_factor(K), r)

            # compute determinant of K
            (s, d) = np.linalg.slogdet(K)

            # calculate likelihood
            like = -.5 * (np.dot(r, alpha) + d + self.N * np.log(2. * np.pi))

            return like

        except (np.linalg.linalg.LinAlgError, ValueError):
            warnings.warn("Non-positive definite kernel detected.",
                          RuntimeWarning)
            return -np.inf
コード例 #52
0
ファイル: population.py プロジェクト: psarkis/exopop
    def lnprior(self, theta, heights):
        if not -20 < theta[0] < 20:
            return -np.inf, None
        if not -2 < theta[1] < 9:
            return -np.inf, None
        if np.any((theta[2:self.ndim] < -2) + (theta[2:self.ndim] > 6)):
            return -np.inf, None

        y = heights - theta[0]
        K = self.get_matrix(theta)

        factor, flag = cho_factor(K)
        logdet = np.sum(2 * np.log(np.diag(factor)))
        lp = -0.5 * (np.dot(y, cho_solve((factor, flag), y)) + logdet)

        if not np.isfinite(lp):
            return -np.inf, K
        return lp, K
コード例 #53
0
 def build_precomputed_data(self):
     if self.num_sampled == 0:
         self.K_chol = numpy.array([])
         self.K_inv_y = numpy.array([])
     else:
         if self.tikhonov_param is not None:
             noise_diag_vector = numpy.full(self.num_sampled,
                                            self.tikhonov_param)
         else:
             noise_diag_vector = self.points_sampled_noise_variance
         kernel_matrix = self.covariance.build_kernel_matrix(
             self.points_sampled,
             noise_variance=noise_diag_vector,
         )
         self.K_chol = cho_factor(kernel_matrix,
                                  lower=True,
                                  overwrite_a=True)
         self.K_inv_y = cho_solve(self.K_chol, self.points_sampled_value)
コード例 #54
0
    def Pars(self, p=None):
        """
    Simple function to return or set pars. Required as _pars is semi-private, and does not
    compute cho factor if set directly, eg MyGP._pars = [blah], plus should be a np.array.
    
    """

        if p == None:
            return np.copy(self._pars)
        else:
            self._pars = np.array(p)
            self.hp_hash[self.si] = hash(np.array(p[:self.n_hp]).tostring())
            self.ChoFactor[self.si] = LA.cho_factor(
                GPC.CovarianceMatrixMult(self._pars[:self.n_hp], self.kf_args,
                                         self.kf, self.mf,
                                         self._pars[self.n_hp:], self.mf_args))
            self.logdetK[self.si] = 2 * np.log(np.diag(
                self.ChoFactor[0][0])).sum()
コード例 #55
0
    def makeAMatrix(self):
        """
        Calculates the "A" matrix, that uses the existing data to find a new 
        component of the new phase vector.
        """
        # Cholsky solve can fail - if so do brute force inversion
        try:
            cf = linalg.cho_factor(self.cov_mat_zz)
            inv_cov_zz = linalg.cho_solve(
                cf, numpy.identity(self.cov_mat_zz.shape[0]))
        except linalg.LinAlgError:
            # print("Cholesky solve failed. Performing SVD inversion...")
            # inv_cov_zz = numpy.linalg.pinv(self.cov_mat_zz)
            raise linalg.LinAlgError(
                "Could not invert Covariance Matrix to for A and B Matrices. Try with a larger pixel scale or smaller L0"
            )

        self.A_mat = self.cov_mat_xz.dot(inv_cov_zz)
コード例 #56
0
 def integrate_step(self, t, x, u):
     ''' state vector x is given by x = [c, v, q, w] where
     c: center of mass cartesian position
     v: center of mass linear velocity
     q: base orientation represented as a quaternion
     w: base angular velocity
      '''
     cnext = x[:3] + self.dt * x[3:6]
     vnext = x[3:6] + (self.dt / self.mass) * (
         self.active_contacts[t] * u[:3] - self.weight)
     qnext = self.integrate_quaternion(x[6:10], self.dt * x[10:13])
     R = self.quaternion_to_rotation(x[6:10])
     factor = linalg.cho_factor(R.dot(self.inertia_com_frame).dot(R.T))
     wnext = x[10:13] + self.dt * linalg.cho_solve(
         factor, self.active_contacts[t] * u[3:] -
         np.cross(x[10:13],
                  np.dot(R.dot(self.inertia_com_frame).dot(R.T), x[10:13])))
     return np.hstack([cnext, vnext, qnext, wnext])
コード例 #57
0
ファイル: emulator.py プロジェクト: spencerhurt/Starfish
    def log_likelihood(self) -> float:
        """
        Get the log likelihood of the emulator in its current state as calculated in
        the appendix of Czekala et al. (2015)

        Returns
        -------
        float

        Raises
        ------
        scipy.linalg.LinAlgError
            If the Cholesky factorization fails
        """
        L, flag = cho_factor(self.v11)
        logdet = 2 * np.sum(np.log(np.diag(L)))
        sqmah = self.w_hat @ cho_solve((L, flag), self.w_hat)
        return -(logdet + sqmah) / 2
コード例 #58
0
def eval_quad(x: np.ndarray, s: np.ndarray) -> np.ndarray:
    """Evaluate the quadratic form x[i].T @ inv(s) @ x[i] for each row i in x.

    :param x:
    :param s: inverse scaling matrix. if 1-dimensional, assume diagonal matrix
    :returns: evaluated quadratic forms

    >>> np.random.seed(666)
    >>> x = np.random.standard_normal((3, 2))
    >>> s = np.diag(np.random.standard_normal(2) ** 2)
    >>> eval_quad(x, s)
    array([1876.35129871, 3804.08373042,  902.76990678])
    """

    l, _ = cho_factor(s, lower=True)
    root = solve_triangular(l, x.T, lower=True).T

    return np.sum(root**2, 1)
コード例 #59
0
 def initialize_sampler(self, nSamp):
     self.samples = Samples(nSamp, self.nDat, self.nCol)
     self.curr_iter = 0
     self.samples.mu[0] = self.priors.mu.mu + self.priors.mu.SCho @ normal(
         size=self.nCol)
     self.samples.Sigma[0] = invwishart.rvs(
         df=self.priors.Sigma.nu,
         scale=self.priors.Sigma.psi,
     )
     self.samples.alpha[0] = self.sample_alpha_new(
         self.curr_mu,
         cho_factor(self.curr_Sigma),
         self.nDat,
     )
     self.samples.delta[0] = range(self.nDat)
     self.samples.r[0] = self.sample_r(self.curr_alphas, self.curr_delta)
     self.samples.eta[0] = 5.
     return
コード例 #60
0
    def _total_nlml(log_par, kernel, fcn_obs, x_obs):
        # N - # points, E - # function outputs
        # fcn_obs (N, E), hypers (num_hyp, )

        # convert from log-par to par
        par = np.exp(log_par)
        num_data = x_obs.shape[1]
        num_out = fcn_obs.shape[1]

        K = kernel.eval(par, x_obs)  # (N, N)
        L = la.cho_factor(K)  # jitter included from eval
        a = la.cho_solve(L, fcn_obs)  # (N, E)
        y_dot_a = np.einsum('ij, ji', fcn_obs.T,
                            a)  # sum of diagonal of A.T.dot(A)

        # negative marginal log-likelihood
        return num_out * np.sum(np.log(np.diag(
            L[0]))) + 0.5 * (y_dot_a + num_out * num_data * np.log(2 * np.pi))