Beispiel #1
0
    def initialize_params(self):
        """ 
        Initialize the params.Sigma_s/Sigma_i, 
        params.J_i/J_s, params.logdet_Sigma_i/logdet_Sigma_s,
        based on i_std and s_std

        """
        params = self.params
        dim_s = self.dim_s
        dim_i = self.dim_i
        s_std = self.s_std
        i_std = self.i_std
        nSuperpixels = self.nSuperpixels

        # Covariance for each superpixel is a diagonal matrix
        for i in range(dim_s):
            params.Sigma_s.cpu[:,i,i].fill(s_std**2)  
            params.prior_sigma_s_sum.cpu[:,i,i].fill(s_std**4) 

        for i in range(dim_i):
            params.Sigma_i.cpu[:,i,i].fill((i_std)**2)
        params.Sigma_i.cpu[:,1,1].fill((i_std/2)**2) # To account for scale differences between the L,A,B

        #calculate the inverse of covariance
        params.J_i.cpu[:]=map(inv,params.Sigma_i.cpu)
        params.J_s.cpu[:]=map(inv,params.Sigma_s.cpu)
        
        # calculate the log of the determinant of covriance
        for i in range(nSuperpixels):
            junk,params.logdet_Sigma_i.cpu[i] = slogdet(params.Sigma_i.cpu[i])
            junk,params.logdet_Sigma_s.cpu[i] = slogdet(params.Sigma_s.cpu[i])
        del junk

        self.update_params_cpu2gpu()
Beispiel #2
0
def drawBeta(k,s,w,size=1):
    """Draw beta from its distribution (Eq.9 Rasmussen 2000) using ARS
    Make it robust with an expanding range in case of failure"""
    nd = w.shape[0]
    
    # precompute some things for speed
    logdetw = slogdet(w)[1]
    temp = 0
    for sj in s:
        sj = np.reshape(sj,(nd,nd))
        temp += slogdet(sj)[1]
        temp -= np.trace(np.dot(w,sj))
    
    lb = nd - 1.0
    flag = True
    cnt = 0
    while flag:
        xi = lb + np.logspace(-3-cnt,1+cnt,200)       # update range if needed
        flag = False
        try:
            ars = ARS(logpbeta,logpbetaprime,xi=xi,lb=lb,ub=np.inf, \
                k=k, s=s, w=w, nd=nd, logdetw=logdetw, temp=temp)
        except:
            cnt += 1
            flag = True

    # draw beta but also pass random seed to ARS code
    return ars.draw(size,np.random.randint(MAXINT))
Beispiel #3
0
def logdet_low_rank(Ainv, U, C, V, diag=False):
    """

    logdet(A+UCV) = logdet(C^{-1} + V A^{-1} U) +  logdet(C) + logdet(A).

    :param Ainv: NxN
    :param U: NxK
    :param C: KxK
    :param V: KxN
    :return:
    """
    Cinv = inv(C)
    sC, ldC = slogdet(C)
    assert sC > 0

    if diag:
        ldA = -log(Ainv).sum()

        tmp1 = einsum('ij,j,jk->ik', V, Ainv, U)
        s1, ld1 = slogdet(Cinv + tmp1)
        assert s1 > 0

    else:
        sAinv, ldAinv = slogdet(Ainv)
        ldA = -ldAinv
        assert sAinv > 0

        s1, ld1 = slogdet(Cinv + V.dot(Ainv).dot(U))
        assert s1 > 0

    return  ld1 + ldC + ldA
Beispiel #4
0
    def loglike(self, params):
        """
        Returns float
        Loglikelihood used in latent factor models

        Parameters
        ----------
        params : list
            Values of parameters to pass into masked elements of array

        Returns
        -------
        loglikelihood : float
        """

        latent = self.latent
        per = self.periods
        var_data_vert = self.var_data_vert
        var_data_vertm1 = self.var_data_vertm1

        lam_0, lam_1, delta_0, delta_1, mu, phi, \
            sigma, dtype = self.params_to_array(params)

        if self.fast_gen_pred:
            a_solve, b_solve = self.opt_gen_pred_coef(lam_0, lam_1, delta_0,
                                                      delta_1, mu, phi, sigma,
                                                      dtype)

        else:
            a_solve, b_solve = self.gen_pred_coef(lam_0, lam_1, delta_0,
                                                  delta_1, mu, phi, sigma,
                                                  dtype)
        # first solve for unknown part of information vector
        lat_ser, jacob, yield_errs  = self._solve_unobs(a_in=a_solve,
                                                        b_in=b_solve,
                                                        dtype=dtype)

        # here is the likelihood that needs to be used
        # use two matrices to take the difference
        var_data_use = var_data_vert.join(lat_ser)[1:]
        var_data_usem1 = var_data_vertm1.join(lat_ser.shift())[1:]

        errors = var_data_use.values.T - mu - np.dot(phi,
                                                     var_data_usem1.values.T)
        sign, j_logdt = nla.slogdet(jacob)
        j_slogdt = sign * j_logdt

        sign, sigma_logdt = nla.slogdet(np.dot(sigma, sigma.T))
        sigma_slogdt = sign * sigma_logdt

        var_yields_errs = np.var(yield_errs, axis=1)

        like = -(per - 1) * j_slogdt - (per - 1) * 1.0 / 2 * sigma_slogdt - \
               1.0 / 2 * np.sum(np.dot(np.dot(errors.T, \
               la.inv(np.dot(sigma, sigma.T))), errors)) - (per - 1) / 2.0 * \
               np.log(np.sum(var_yields_errs)) - 1.0 / 2 * \
               np.sum(yield_errs**2/var_yields_errs[None].T)

        return like
Beispiel #5
0
def logdet_low_rank2(Ainv, U, C, V, diag=False):
    '''
    computes logdet(A+UCV) using https://en.wikipedia.org/wiki/Matrix_determinant_lemma
    '''
    if diag:
        ldA = -log(Ainv).sum()
        temp = C.dot(V).dot(U * Ainv[:,None])
    else:
        ldA = -slogdet(Ainv)[1]
        temp = C.dot(V).dot(Ainv).dot(U)
    temp.flat[::temp.shape[0]+1] += 1
    return slogdet(temp)[1] + ldA
Beispiel #6
0
	def train_sgd(self, X, **kwargs):
		# hyperparameters
		max_iter = kwargs.get('max_iter', 1)
		batch_size = kwargs.get('batch_size', min([100, X.shape[1]]))
		step_width = kwargs.get('step_width', 0.001)
		momentum = kwargs.get('momentum', 0.9)
		shuffle = kwargs.get('shuffle', True)
		pocket = kwargs.get('pocket', shuffle)

		# completed basis and filters
		A = self.A
		W = inv(A)

		# initial direction of momentum
		P = 0.

		if pocket:
			energy = mean(self.prior_energy(dot(W, X))) - slogdet(W)[1]

		for j in range(max_iter):
			if shuffle:
				# randomize order of data
				X = X[:, permutation(X.shape[1])]

			for i in range(0, X.shape[1], batch_size):
				batch = X[:, i:i + batch_size]

				if not batch.shape[1] < batch_size:
					# calculate gradient
					P = momentum * P + A.T - \
						dot(self.prior_energy_gradient(dot(W, batch)), batch.T) / batch_size

					# update parameters
					W += step_width * P
					A = inv(W)

		if pocket:
			# test for improvement of lower bound
			if mean(self.prior_energy(dot(W, X))) - slogdet(W)[1] > energy:
				if Distribution.VERBOSITY > 0:
					print 'No improvement.'

				# don't update parameters
				return False

		# update linear features
		self.A = A

		return True
Beispiel #7
0
def mvnkld(mu0, mu1, sigma0, sigma1):
    """

    Returns the Kullback-Leibler Divergence (KLD) between two normal distributions.

    """
    k = len(mu0)
    assert k == len(mu1)
    delta = mu1 - mu0
    (sign0, logdet0) = linalg.slogdet(sigma0)
    (sign1, logdet1) = linalg.slogdet(sigma1)
    lndet = logdet0 - logdet1
    A = trace(linalg.solve(sigma1, sigma0))
    B = delta.T.dot(linalg.solve(sigma1, delta))
    return 0.5 * (A + B - k - lndet)
Beispiel #8
0
def clik(lam,n,n2,n_eq,bigE,I,WS):
    """ Concentrated (negative) log-likelihood for SUR Error model
    
    Parameters
    ----------
    lam         : n_eq x 1 array of spatial autoregressive parameters
    n           : number of observations in each cross-section
    n2          : n/2
    n_eq        : number of equations
    bigE        : n by n_eq matrix with vectors of residuals for 
                  each equation
    I           : sparse Identity matrix
    WS          : sparse spatial weights matrix
    
    Returns
    -------
    -clik       : negative (for minimize) of the concentrated
                  log-likelihood function
    
    """
    WbigE = WS * bigE
    spfbigE = bigE - WbigE * lam.T
    sig = np.dot(spfbigE.T,spfbigE) / n
    ldet = la.slogdet(sig)[1]
    logjac = jacob(lam,n_eq,I,WS)
    clik = - n2 * ldet + logjac
    return -clik  # negative for minimize
Beispiel #9
0
def mvnlogpdf_p (X, mu, PrecMat):
    """
    Multivariate Normal Log PDF

    Args:
        X      : NxD matrix of input data. Each ROW is a single sample.
        mu     : Dx1 vector for the mean.
        PrecMat: DxD precision matrix.

    Returns:
        Nx1 vector of log probabilities.
    """
    D = PrecMat.shape[0]
    X = _check_single_data(X, D)
    N = len(X)

    _, neglogdet = linalg.slogdet(PrecMat)
    normconst = -0.5 * (D * np.log(2 * constants.pi) - neglogdet)

    logpdf = np.empty((N, 1))
    for n, x in enumerate(X):
        d = x - mu
        logpdf[n] = normconst - 0.5 * d.dot(PrecMat.dot(d))

    return logpdf
Beispiel #10
0
    def addItem(self, i, newf, oldf, newt, oldt, followset, s, ss):    
             
        if newt == -1:
            newt = self.K
            self.K += 1
            self.cholesky[newt] = cholesky(self.con.lambda0 + self.con.kappa0_outermu0).T
            
        n = len(followset)
        self.counts[newt] += n
        assert self.counts[oldt] >= 0
        assert self.counts[newt] > 0
        self.s[newt] += s
        self.ss[newt] += ss
        #self.denom[newt] = self.integrateOverParameters(self.counts[newt], self.s[newt], self.ss[newt])

        n = self.counts[newt]
        kappan = self.con.kappa0 + n
        mun = (self.con.kappa0 * self.con.mu0 + self.s[newt]) / kappan
        lambdan = self.con.lambda0 + self.ss[newt] + self.con.kappa0_outermu0 - kappan * np.outer(mun, mun)
        self.logdet[newt] = slogdet(lambdan)[1]
        #for ind in followset:
        #    cholupdate(self.cholesky[newt], self.data[ind].copy())
            
        for j in followset:
            self.assignments[j] = newt
        self.follow[i] = newf
        self.sit_behind[newf].add(i)
Beispiel #11
0
 def preprocess(self):
     self.VarList = tuple(self.Vars)
     self.NumVars = len(self.VarList)
     self.VarVector = BlockMatrix((tuple(self.Vars[var] for var in self.VarList),))
     self.NumDims = self.VarVector.shape[1]
     self.Mean = BlockMatrix((tuple(self.Param[('Mean', var)] for var in self.VarList),))
     self.DemeanedVarVector = self.VarVector - self.Mean
     cov = [self.NumVars * [None] for _ in range(self.NumVars)]   # careful not to create same mutable object
     for i in range(self.NumVars):
         for j in range(i):
             if ('Cov', self.VarList[i], self.VarList[j]) in self.Param:
                 cov[i][j] = self.Param[('Cov', self.VarList[i], self.VarList[j])]
                 cov[j][i] = cov[i][j].T
             else:
                 cov[j][i] = self.Param[('Cov', self.VarList[j], self.VarList[i])]
                 cov[i][j] = cov[j][i].T
         cov[i][i] = self.Param[('Cov', self.VarList[i])]
     self.Cov = BlockMatrix(cov)
     try:
         cov = CompyledFunc(var_names_and_syms={}, dict_or_expr=self.Cov)()
         sign, self.LogDetCov = slogdet(cov)
         self.LogDetCov *= sign
         self.InvCov = inv(cov)
     except:
         pass
     self.PreProcessed = True
Beispiel #12
0
 def __init__(self, d, nu, mu, Lambda):
     self.nu = nu
     self.d = d
     self.mu = mu
     self.precision = inv(Lambda)
     self.logdet = slogdet(Lambda)[1]
     self.Z = gammaln(nu / 2) + d / 2 * (math.log(nu) + math.log(math.pi)) - gammaln((nu + d) / 2)
Beispiel #13
0
    def _loglike_mle(self, params):
        """
        Loglikelihood of AR(p) process using exact maximum likelihood
        """
        nobs = self.nobs
        X = self.X
        endog = self.endog
        k_ar = self.k_ar
        k_trend = self.k_trend

        # reparameterize according to Jones (1980) like in ARMA/Kalman Filter
        if self.transparams:
            params = self._transparams(params)

        # get mean and variance for pre-sample lags
        yp = endog[:k_ar].copy()
        if k_trend:
            c = [params[0]] * k_ar
        else:
            c = [0]
        mup = np.asarray(c / (1 - np.sum(params[k_trend:])))
        diffp = yp - mup[:, None]

        # get inv(Vp) Hamilton 5.3.7
        Vpinv = self._presample_varcov(params)

        diffpVpinv = np.dot(np.dot(diffp.T, Vpinv), diffp).item()
        ssr = sumofsq(endog[k_ar:].squeeze() - np.dot(X, params))

        # concentrating the likelihood means that sigma2 is given by
        sigma2 = 1.0 / nobs * (diffpVpinv + ssr)
        self.sigma2 = sigma2
        logdet = slogdet(Vpinv)[1]  # TODO: add check for singularity
        loglike = -1 / 2.0 * (nobs * (np.log(2 * np.pi) + np.log(sigma2)) - logdet + diffpVpinv / sigma2 + ssr / sigma2)
        return loglike
Beispiel #14
0
def calcNumericFit(xVal, yVal, yErr):
    a11 = 0
    a21 = 0
    a12 = 0
    a22 = 0

    #Berechne die Koeffizientenmatrix
    for i in range(len(xVal)):
        a22 = a22 + 1/(yErr[i]**2)
        a12 = a12 + xVal[i]/yErr[i]**2
        a21 = a21 + xVal[i]/yErr[i]**2
        a11 = a11 + xVal[i]**2/yErr[i]**2
    (sign, logdet) = linalg.slogdet([[a11, a21], [a12, a22]]) 
    detCoeffMat = sign * np.exp(logdet)

    xy = 0
    xyxy = 0
    y0 = 0
    #Berechne die Koeffizienten
    for i in range(len(xVal)):
        xy = xy + xVal[i]*yVal[i]/(yErr[i]**2)
        xyxy = xyxy + xVal[i]*yVal[i]/yErr[i]**4
        y0 = y0 + yVal[i]/(yErr[i]**2)

    aBest = (1/detCoeffMat) * (xy * a22 - a21 * y0)
    bBest = (1/detCoeffMat) * (a11 * y0 - a21 * xy)

    #Berechne die Unsicherheiten
    aErr = np.sqrt(1/detCoeffMat * a22)
    bErr = np.sqrt(1/detCoeffMat * a11)
    return [aBest, bBest, aErr, bErr]
Beispiel #15
0
def mvnlogpdf (X, mu, Sigma):
    """
    Multivariate Normal Log PDF

    Args:
        X    : NxD matrix of input data. Each ROW is a single sample.
        mu   : Dx1 vector for the mean.
        Sigma: DxD covariance matrix.

    Returns:
        Nx1 vector of log probabilities.
    """
    D = Sigma.shape[0]
    X = _check_single_data(X, D)
    N = len(X)

    _, logdet = linalg.slogdet(Sigma)
    normconst = -0.5 * (D * np.log(2 * constants.pi) + logdet)

    iS = linalg.inv(Sigma)
    logpdf = np.empty((N, 1))
    for n, x in enumerate(X):
        d = x - mu
        logpdf[n] = normconst - 0.5 * d.dot(iS.dot(d))

    return logpdf
Beispiel #16
0
    def lnprob_cov(C):

        # Get first term of loglikelihood expression (y * (1/C) * y.T)
        # Do computation using Cholesky decomposition
        try:
            
            U, luflag = cho_factor(C)
            
        except LinAlgError:

            # Matrix is not positive semi-definite, so replace it with the 
            #  positive semi-definite matrix that is nearest in the Frobenius norm

            E, EV = eigh(C)
            E[E<0] = 1e-12
            U, luflag = cho_factor(EV.dot(np.diag(Ep)).dot(EV.T))
            
        finally:

            x2 = cho_solve((U, luflag), dxy)
            L1 = dxy.dot(x2)

        # Get second term of loglikelihood expression (log det C)
        sign, L2 = slogdet(C)

        # Why am I always confused by this?
        thing_to_be_minimised = (L1 + L2)

        return thing_to_be_minimised
	def logjacobian(self, data):
		"""
		Returns the log-determinant of the Jabian matrix evaluated at the given
		data points.

		@type  data: array_like
		@param data: data points stored in columns

		@rtype: ndarray
		@return: the logarithm of the Jacobian determinants
		"""

		# completed filter matrix
		W = inv(self.ica.A)

		# determinant of linear transformation
		logjacobian = zeros([1, data.shape[1]]) + slogdet(W)[1]

		# linearly transform data
		data = dot(W, data)

		length = len(str(len(self.ica.marginals)))

		if Transform.VERBOSITY > 0:
			print ('{0:>' + str(length) + '}/{1}').format(0, len(self.ica.marginals)),

		for i, mog in enumerate(self.ica.marginals):
			logjacobian += UnivariateGaussianization(mog).logjacobian(data[[i]])

			if Transform.VERBOSITY > 0:
				print (('\b' * (length * 2 + 2)) + '{0:>' + str(length) + '}/{1}').format(i + 1, len(self.ica.marginals)),
		if Transform.VERBOSITY > 0:
			print

		return logjacobian
Beispiel #18
0
def posdef_diag_dom(n, m=10, s=None):
    """Generates a positive-definite, diagonally dominant n x n matrix.
    Arguments:
        n - width/height of the matrix
        m - additional multiplier for diagonal-dominance control
            (shouldn't be less than 10, though)
        s - optional seed for RNG
    """
    if m < 10:
        print "Multiplier should be >= 10. Using m=10 instead."
        m = 10
    np.random.seed(s)  # re-seeding RNG is needed for multiprocessing
    while True:
        signs = np.random.randint(2, size=(n, n))
        f = (signs == 0)
        signs[f] = -1
        signs = np.triu(signs, k=1)
        a = np.random.random((n, n))
        u = a * signs
        l = u.T
        a = l + u
        for i, row in enumerate(a):
            a[i, i] = (row * row).sum() * m
        if la.slogdet(a) != (0, np.inf):
            break
    return a
Beispiel #19
0
	def nll(self, log_th, x_nd, y_n, grad=False, use_self_hyper=True):
		"""
		Returns the negative log-likelihood : -log[p(y|x,th)],
		where, abc are the LOG -- hyper-parameters. 
			If adbc==None, then it uses the self.{a,d,b,c}
			to compute the value and the gradient.

		@params:
			x_nd    : input vectors in R^d
			y_n     : output at the input vectors
			log_th  : vector of hyperparameters
			grad    : if TRUE, this function also returns
				      the partial derivatives of nll w.r.t
				      each (log) hyper-parameter.
		"""
		## make the data-points a 2D matrix:
		if x_nd.ndim==1:
			x_nd = np.atleast_2d(x_nd).T
		if y_n.ndim==1:
			y_n = np.atleast_2d(y_n).T

		if not use_self_hyper:
			self.set_log_hyperparam(log_th)

		log_th = np.squeeze(log_th)

		N,d = x_nd.shape
		assert len(y_n)==N, "x and y shape mismatch."

		K,K1,K2 = self.get_covmat(x_nd, get_both=True)

		## compute the determinant using LU factorization:
		sign, log_det = nla.slogdet(K)
		#assert sign > 0, "Periodic Cov : covariance matrix is not PSD."
		
		## compute the inverse of the covariance matrix through gaussian
		## elimination:
		K_inv = nla.solve(K, np.eye(N))
		Ki_y  = K_inv.dot(y_n)

		## negative log-likelihood:
		nloglik = 0.5*( N*self.log_2pi + log_det + y_n.T.dot(Ki_y))

		if grad:
			num_hyper = self.n1 + self.n2
			K_diff = K_inv - Ki_y.dot(Ki_y.T)
			dfX = np.zeros(num_hyper)

			dK1 = self.k1.get_dK_dth(log_th[:self.n1], x_nd, y_n, use_self_hyper)
			dK2 = self.k2.get_dk_dth(log_th[self.n1:], x_nd, y_n, use_self_hyper)

			for i in xrange(self.n1):
				dfX[i] = np.sum(np.sum( K_diff * dK1[i].dot(K2)))
			for i in xrange(self.n2):
				dfX[i+self.n1] = np.sum(np.sum( K_diff * K1.dot(dK2[i])))

			return nloglik, dfX

		return nloglik
 def forward_jacobian_log_det(self, x):
     _, jld = np_la.slogdet(self.W)
     if x.ndim == 1:
         return jld
     elif x.ndim == 2:
         return x.shape[0] * jld
     else:
         raise ValueError('x must be one or two dimensional.')
Beispiel #21
0
		def f(W, X):
			W = W.reshape(self.num_hiddens, self.num_hiddens)
			v = mean(self.prior_energy(dot(W, X))) - slogdet(W)[1]

			if weight_decay > 0.:
				v += weight_decay / 2. * sum(square(inv(W)))

			return v
 def log_prior(self, i):
     """Return the probability of `X[i]` under the prior alone."""
     mu = self.prior.m_0
     covar = (self.prior.k_0 + 1) / (self.prior.k_0*(self.prior.v_0 - self.D + 1)) * self.prior.S_0
     logdet_covar = slogdet(covar)[1]
     inv_covar = inv(covar)
     v = self.prior.v_0 - self.D + 1
     return self._multivariate_students_t(i, mu, logdet_covar, inv_covar, v)
Beispiel #23
0
def logmvstprob(x, mu, nu, d, Lambda):
    diff = x - mu[:,None]
    prob = gammaln((nu + d) / 2)
    prob -= gammaln(nu / 2)
    prob -= d / 2 * (math.log(nu) + math.log(math.pi))
    prob -= 0.5 * slogdet(Lambda)[1]
    prob -= (nu + d) / 2. * math.log(1 + 1. / nu * np.dot(np.dot(diff.T, inv(Lambda)), diff)[0][0])
    return prob
Beispiel #24
0
def logZexp(r, Q):
    sd, logd = slogdet(Q)
    if sd != 1:
        raise LinAlgError('Q is not positive definite!')
    
    D = r.shape[0]
    
    return D / 2 * math.log(2 * math.pi) - logd / 2 + np.dot(r, solve(Q, r)) / 2
Beispiel #25
0
    def calc_log_z(_mu, _lambda, _kappa, _nu):
        d = len(_mu)
        sign, detr = slogdet(_lambda)
        log_z = (LOG2*(_nu*d/2.0)
                 + (d/2.0)*math.log(2*math.pi/_kappa)
                 + multigammaln(_nu/2, d) - (_nu/2.0)*detr)

        return log_z
Beispiel #26
0
    def calc_log_z(_mu, _sigma, S):
        d = len(_mu)
        sign, detr = slogdet(_sigma)
        _sigma_inv = np.linalg.inv(_sigma)

        log_z = detr/2 + np.sum(_mu*np.dot(_sigma_inv, _mu))

        return log_z
Beispiel #27
0
def KL_gaussian(mu0, sig0, mu1, sig1):
    """ KL(N_0 || N_1).
    """
    D = len(mu0)
    if D != len(mu1) or D != sig0.shape[0] or D != sig1.shape[0]:
        raise RuntimeError("Means and covariances my be the same dimension.")
    if sig0.shape[0] != sig0.shape[1] or sig1.shape[0] != sig1.shape[1]:
        raise RuntimeError("Covariance matrices must be square.")

    s1inv = npl.inv(sig1)
    s0_ld = npl.slogdet(sig0)[1]
    s1_ld = npl.slogdet(sig1)[1]
    x = mu1 - mu0
    tmp = np.trace(np.dot(s1inv, sig0)) + np.dot(x.T, np.dot(s1inv, x))
    tmp += -D - s0_ld + s1_ld

    return 0.5 * tmp
Beispiel #28
0
    def _calculate_log_likelihood(self):
        """
        Calculates the log-likelihood (up to a constant) for a given
        self.theta.

        """
        R = zeros((self.n, self.n))
        X, Y = self.X, self.Y
        thetas = power(10., self.thetas)

        # exponentially weighted distance formula
        for i in range(self.n):
            R[i, i+1:self.n] = exp(-thetas.dot(square(X[i, ...] - X[i+1:self.n, ...]).T))

        R *= (1.0 - self.nugget)
        R += R.T + eye(self.n)
        self.R = R

        one = ones(self.n)
        rhs = column_stack([Y, one])
        try:
            # Cholesky Decomposition
            self.R_fact = cho_factor(R)
            sol = cho_solve(self.R_fact, rhs)
            solve = lambda x: cho_solve(self.R_fact, x)
            det_factor = log(abs(prod(diagonal(self.R_fact[0])) ** 2) + 1.e-16)

        except (linalg.LinAlgError, ValueError):
            # Since Cholesky failed, try linear least squares
            self.R_fact = None  # reset this to none, so we know not to use Cholesky
            sol = lstsq(self.R, rhs)[0]
            solve = lambda x: lstsq(self.R, x)[0]
            det_factor = slogdet(self.R)[1]

        self.mu = dot(one, sol[:, :-1]) / dot(one, sol[:, -1])
        y_minus_mu = Y - self.mu
        self.R_solve_ymu = solve(y_minus_mu)
        self.R_solve_one = sol[:, -1]
        self.sig2 = dot(y_minus_mu.T, self.R_solve_ymu) / self.n

        if isinstance(self.sig2, ndarray):
            self.log_likelihood = -self.n/2. * slogdet(self.sig2)[1] \
                                  - 1./2.*det_factor
        else:
            self.log_likelihood = -self.n/2. * log(self.sig2) \
                                  - 1./2.*det_factor
Beispiel #29
0
 def __init__(self,bigy,bigX,iter=False,maxiter=5,epsilon=0.00001,verbose=False):
     # setting up the cross-products
     self.bigy = bigy
     self.bigX = bigX
     self.n_eq = len(bigy.keys())
     self.n = bigy[0].shape[0]
     self.bigK = np.zeros((self.n_eq,1),dtype=np.int_)
     for r in range(self.n_eq):
         self.bigK[r] = self.bigX[r].shape[1]
     self.bigXX,self.bigXy = sur_crossprod(self.bigX,self.bigy)
     # OLS regression by equation, sets up initial residuals
     _sur_ols(self) # creates self.bOLS and self.olsE
     # SUR estimation using OLS residuals - two step estimation
     self.bSUR,self.varb,self.sig = sur_est(self.bigXX,self.bigXy,self.olsE,self.bigK)
     resids = sur_resids(self.bigy,self.bigX,self.bSUR)  # matrix of residuals
     # Sigma and log det(Sigma) for null model
     self.sig_ols = self.sig
     sols = np.diag(np.diag(self.sig))
     self.ldetS0 = np.log(np.diag(sols)).sum()
     det0 = self.ldetS0
     # setup for iteration
     det1 = la.slogdet(self.sig)[1]
     self.ldetS1 = det1
     #self.niter = 0
     if iter:    # iterated FGLS aka ML
         n_iter = 0
         while np.abs(det1-det0) > epsilon and n_iter <= maxiter:
             n_iter += 1
             det0 = det1
             self.bSUR,self.varb,self.sig = sur_est(self.bigXX,self.bigXy,\
                       resids,self.bigK)
             resids = sur_resids(self.bigy,self.bigX,self.bSUR)
             det1 = la.slogdet(self.sig)[1]
             if verbose:
                 print(n_iter,det0,det1)
         self.bigE = sur_resids(self.bigy,self.bigX,self.bSUR)
         self.ldetS1 = det1
         self.niter = n_iter
     else:
         self.niter = 1
         self.bigE = resids
     self.bigYP = sur_predict(self.bigy,self.bigX,self.bSUR)  # LA added 10/30/16    
     self.corr = sur_corr(self.sig)
     lik = self.n_eq * (1.0 + np.log(2.0*np.pi)) + self.ldetS1
     self.llik = - (self.n / 2.0) * lik
Beispiel #30
0
 def log_pdf(X, mu, C):
     n, d = X.shape
     inv = la.solve(C, (X - mu).T).T
     maha = np.einsum('ij,ij->i', (X - mu), inv)
     # Directly calculates log(det(C)), bypassing the numerical issues
     # of calculating the determinant of C, which can be very close to zero
     _, logdet = la.slogdet(C)
     log2pi = np.log(2 * np.pi)
     return -0.5 * (d * log2pi + logdet + maha)
Beispiel #31
0
    def _logdet_MM(self):
        if not self._restricted:
            return 0.0

        M = self._mean.AX
        ldet = slogdet(M.T @ M)
        if ldet[0] != 1.0:
            raise ValueError("The determinant of MрхђM should be positive.")
        return ldet[1]
Beispiel #32
0
def callback_one(ip,d):
    pp = objective.inflate(ip)
    print
    print 'CALLBACK:'
    for term,ppp,dd in zip(terms,pp,d):
        M = ppp['M']
        M = (M+np.transpose(M))/2
        N = M.shape[0]
        s , lldet = slogdet(np.identity(N)-M)
        df = term.df(ppp,dd)
        dM = term.inflate(df)['M']
        ds , dldet = slogdet(np.identity(N)-M+0.001*dM)
        w,v = eig( np.identity(N) - M )
        print 'eig M' , w.real
        print [term.f(ppp,dd)]
        print 'Iteration s, ldet I-M: %d , %f     %d , %f     norm theta %f    norm M %f   barr %d' % \
              (s , lldet , ds, dldet, norm(ppp['theta']), norm(M), term.barrier(ppp,dd))
        print
Beispiel #33
0
        def llk(x):
            A = 10 ** x[0]
            l = 10 ** x[1]
            k = sqk(A, l)
            Ac, R, Q, V = buildmats(k)
            lk = -0.5 * Y.T.dot(Ksolve(Ac, R, Q, V ,Y)) - 0.5 * (slogdet(spl.toeplitz(Ac))[1]+slogdet(Q)[1])

            pr = -0.5 * x[0] ** 2 - 0.5 * x[1] ** 2
            return -lk - pr
Beispiel #34
0
    def value(self, mean, cov):
        ym = self._y - mean
        Kiym = solve(cov, ym)

        (s, logdet) = slogdet(cov)
        assert s == 1.

        n = len(self._y)
        return -(logdet + ym.dot(Kiym) + n * log(2 * pi)) / 2
Beispiel #35
0
 def energy(self):
     F = self.N * np.log(2 * np.pi * self.sigma2)
     log_det = np.prod(
         la.slogdet(self.C @ self.XtX + self.sigma2 * np.eye(self.D)))
     F += log_det
     F += (la.norm(self.y)**2 -
           self.Xty.T @ self.Sigma_W @ self.Xty) / self.sigma2
     self.F = F / 2
     return self.F
Beispiel #36
0
 def __init__(self,bigy,bigX,iter=False,maxiter=5,epsilon=0.00001,verbose=False):
     # setting up the cross-products
     self.bigy = bigy
     self.bigX = bigX
     self.n_eq = len(bigy.keys())
     self.n = bigy[0].shape[0]
     self.bigK = np.zeros((self.n_eq,1),dtype=np.int_)
     for r in range(self.n_eq):
         self.bigK[r] = self.bigX[r].shape[1]
     self.bigXX,self.bigXy = sur_crossprod(self.bigX,self.bigy)
     # OLS regression by equation, sets up initial residuals
     self.sur_ols() # creates self.bOLS and self.olsE
     # SUR estimation using OLS residuals - two step estimation
     self.bSUR,self.varb,self.sig = sur_est(self.bigXX,self.bigXy,self.olsE,self.bigK)
     resids = sur_resids(self.bigy,self.bigX,self.bSUR)  # matrix of residuals
     # Sigma and log det(Sigma) for null model
     self.sig_ols = self.sig
     sols = np.diag(np.diag(self.sig))
     self.ldetS0 = np.log(np.diag(sols)).sum()
     det0 = self.ldetS0
     # setup for iteration
     det1 = la.slogdet(self.sig)[1]
     self.ldetS1 = det1
     #self.niter = 0
     if iter:    # iterated FGLS aka ML
         n_iter = 0
         while np.abs(det1-det0) > epsilon and n_iter <= maxiter:
             n_iter += 1
             det0 = det1
             self.bSUR,self.varb,self.sig = sur_est(self.bigXX,self.bigXy,\
                       resids,self.bigK)
             resids = sur_resids(self.bigy,self.bigX,self.bSUR)
             det1 = la.slogdet(self.sig)[1]
             if verbose:
                 print (n_iter,det0,det1)
         self.bigE = sur_resids(self.bigy,self.bigX,self.bSUR)
         self.ldetS1 = det1
         self.niter = n_iter
     else:
         self.niter = 1
         self.bigE = resids
     self.corr = sur_corr(self.sig)
     lik = self.n_eq * (1.0 + np.log(2.0*np.pi)) + self.ldetS1
     self.llik = - (self.n / 2.0) * lik
Beispiel #37
0
    def test_loglikelihood(self):
        gsm = GSM(3, 1)

        samples = gsm.sample(100000)

        # compute entropy analytically
        entropy = 0.5 * slogdet(2. * pi * e * gsm.covariance / gsm.scales)[1]

        # compare with estimated entropy
        self.assertAlmostEqual(entropy, -mean(gsm.loglikelihood(samples)), 1)
Beispiel #38
0
    def expected_log_likelihood(self, data):
        W = inv(self.psi)
        N = data.shape[1]
        x = data - self.m

        return -sum(x * dot(W, x), 0)[None, :] * self.nu / 2 \
         - self.dim / 2. / self.s \
         + sum(psi((self.nu + 1. - arange(1, self.dim + 1)) / 2.)) / 2. \
         + slogdet(W)[1] / 2. \
         - self.dim / 2. * log(pi)
Beispiel #39
0
    def model_evidence(self):
        self.ensure_gram_matrix()
        t = self._train_t

        datafit = t.T.dot(self._Cinvt)
        s, logdet = slogdet(self._C)
        complexity = s * logdet
        nomalization = len(t) * np.log(np.pi * 2)

        return -0.5 * (datafit + complexity + nomalization)
Beispiel #40
0
 def log_prior(self, i):
     """Return the probability of `X[i]` under the prior alone."""
     mu = self.prior.m_0
     covar = (self.prior.k_0 +
              1) / (self.prior.k_0 *
                    (self.prior.v_0 - self.D + 1)) * self.prior.S_0
     logdet_covar = slogdet(covar)[1]
     inv_covar = inv(covar)
     v = self.prior.v_0 - self.D + 1
     return self._multivariate_students_t(i, mu, logdet_covar, inv_covar, v)
Beispiel #41
0
    def KL(self, othr):
        j, k = self._ci(othr)
        k = min(k, self.Ncov / 2, othr.Ncov / 2)
        delta = self.MomAct[j:k] - othr.MomAct[j:k]

        ChSelf = cho_factor(self.Cov[j:k, j:k])
        h = cho_solve(ChSelf, delta)
        r = cho_solve(ChSelf, othr.Cov[j:k, j:k])

        return (np.trace(r) + delta.dot(h) - slogdet(r)[1] - k + j) / 2
Beispiel #42
0
 def _logdetH(self):
     """
     log(|H|) for H = s⁻¹XᵀQD⁻¹QᵀX.
     """
     if not self._restricted:
         return 0.0
     ldet = slogdet(self._tXTBtX / self.scale)
     if ldet[0] != 1.0:
         raise ValueError("The determinant of H should be positive.")
     return ldet[1]
def mwdp_fusion(prior, cov_prior, meas, cov_meas):
    """
    Fuse ellipses A and B in original state space using representation with highest likelihood; assumes independent
    measurement dimensions for the switch of covariance elements
    :param prior:       Prior ellipse in original state space
    :param cov_prior:   Covariance of prior ellipse
    :param meas:        Measured ellipse in original state space
    :param cov_meas:    Covariance of measured ellipse
    :return:            Mean and covariance of fusion with highest likelihood representation and number of 90 degree
                        shifts of ellipse B to get highest likelihood representation
    """
    res_orig_alt_rots = np.zeros((4, 5))
    res_orig_alt_rots_cov = np.zeros((4, 5, 5))
    res_orig_log_lik = np.zeros(4)
    innov = np.zeros((4, 5))
    meas_alt = np.zeros(5)
    meas_alt[M] = meas[M]

    # test all 4 representations
    for k in range(4):
        # shift orientation and if necessary switch semi-axis in mean and orientation
        meas_alt[AL] = (meas[AL] + k * np.pi * 0.5) % (2 * np.pi)
        if k % 2 != 0:
            meas_alt[L] = meas[W]
            meas_alt[W] = meas[L]
            cov_meas_alt = np.copy(cov_meas)
            cov_meas_alt[3, 3] = cov_meas[4, 4]
            cov_meas_alt[4, 4] = cov_meas[3, 3]
        else:
            meas_alt[L] = meas[L]
            meas_alt[W] = meas[W]
            cov_meas_alt = np.copy(cov_meas)

        # Kalman update
        S_orig_alt = cov_prior + cov_meas_alt
        if np.linalg.det(S_orig_alt) == 0:
            print('Singular S_orig')
            # print(S_orig)
            continue
        K_orig_alt = np.dot(cov_prior, np.linalg.inv(S_orig_alt))
        innov[k] = meas_alt - prior
        # use shorter angle difference
        innov[k, 2] = ((innov[k, 2] + np.pi) % (2 * np.pi)) - np.pi
        res_orig_alt_rots[k] = prior + np.dot(K_orig_alt, innov[k])
        res_orig_alt_rots_cov[k] = cov_prior - np.dot(
            np.dot(K_orig_alt, S_orig_alt), K_orig_alt.T)

        # calculate log-likelihood
        res_orig_log_lik[k] = -0.5 * np.dot(np.dot(innov[k], inv(S_orig_alt)),
                                            innov[k])
        sign, logdet_inv = slogdet(inv(S_orig_alt))
        res_orig_log_lik[k] += 0.5 * logdet_inv - 2.5 * np.log(2 * np.pi)

    return res_orig_alt_rots[np.argmax(res_orig_log_lik)], res_orig_alt_rots_cov[np.argmax(res_orig_log_lik)],\
           np.argmax(res_orig_log_lik)
Beispiel #44
0
def maxdiv_gaussian_globalcov(X,
                              intervals,
                              mode='I_OMEGA',
                              gaussian_mode='GLOBAL_COV',
                              **kwargs):
    """ Scores given intervals by assuming gaussian distributions with equal covariance.
    
    `X` is a d-by-n matrix with `n` data points, each with `d` attributes.
    
    `intervals` has to be an iterable of `(a, b, score)` tuples, which define an
    interval `[a,b)` which is suspected to be an anomaly.
    
    Returns: a list of `(a, b, score)` tuples. `a` and `b` are the same as in the given
             `intervals` iterable, but the scores will indicate whether a given interval
             is an anomaly or not.
    """

    dimension, n = X.shape
    numValidSamples = n if not np.ma.isMaskedArray(X) else X[0, :].count()

    X_integral = np.cumsum(X if not np.ma.isMaskedArray(X) else X.filled(0),
                           axis=1)
    sums_all = X_integral[:, -1]
    if (gaussian_mode == 'GLOBAL_COV') and (dimension > 1):
        cov = np.ma.cov(X).filled(0)
        cov_chol = cho_factor(cov)
        logdet = slogdet(cov)[1]

    scores = []

    eps = 1e-7
    for a, b, base_score in intervals:

        extreme_interval_length = b - a if not np.ma.isMaskedArray(X) else X[
            0, a:b].count()
        non_extreme_points = numValidSamples - extreme_interval_length

        sums_extreme = X_integral[:, b - 1] - (X_integral[:, a -
                                                          1] if a > 0 else 0)
        sums_non_extreme = sums_all - sums_extreme
        sums_extreme /= extreme_interval_length
        sums_non_extreme /= non_extreme_points

        diff = sums_extreme - sums_non_extreme
        if (gaussian_mode == 'GLOBAL_COV') and (dimension > 1):
            score = diff.T.dot(cho_solve(cov_chol, diff))
            if (mode == 'CROSSENT') or (mode == 'CROSSENT_TS'):
                score += slogdet
        else:
            score = np.sum(diff * diff)
        if (mode == 'CROSSENT') or (mode == 'CROSSENT_TS'):
            score += dimension * (1 + np.log(2 * np.pi))
        scores.append((a, b, score))

    return scores
Beispiel #45
0
    def initialize(self):
        """
        Initialize the gibbs sampler state.

        I start with log N tables and randomly initialize customers to those tables.

        """
        # First check the prior degrees of freedom.
        # It has to be >= num_dimension
        if self.prior.nu < self.embedding_size:
            self.log.warn(
                "The initial degrees of freedom of the prior is less than the dimension!. "
                "Setting it to the number of dimensions: {}".format(
                    self.embedding_size))
            self.prior.nu = self.embedding_size

        deg_of_freedom = self.prior.nu - self.embedding_size + 1
        # Now calculate the covariance matrix of the multivariate T-distribution
        coeff = (self.prior.kappa + 1.) / (self.prior.kappa * deg_of_freedom)
        sigma_T = self.prior.sigma * coeff
        # This features in the original code, but doesn't get used
        # Or is it just to check that the invert doesn't fail?
        #sigma_Tinv = inv(sigma_T)
        sigma_TDet_sign, sigma_TDet = slogdet(sigma_T)
        if sigma_TDet_sign != 1:
            raise ValueError(
                "sign of log determinant of initial sigma is {}".format(
                    sigma_TDet_sign))

        # Storing zeros in sumTableCustomers and later will keep on adding each customer.
        self.sum_squared_table_customers[:] = 0
        # Means are set to the prior and then updated as we add each assignment
        self.table_means.np[:] = self.prior.mu

        # Initialize the cholesky decomp of each table, with no counts yet
        for table in range(self.num_tables):
            self.table_cholesky_ltriangular_mat.np[
                table] = self.prior.chol_sigma.copy()

        # Randomly assign customers to tables
        self.table_assignments = []
        pbar = get_progress_bar(len(self.corpus),
                                title="Initializing",
                                show_progress=self.show_progress)
        for doc_num, doc in enumerate(pbar(self.corpus)):
            tables = list(np.random.randint(self.num_tables, size=len(doc)))
            self.table_assignments.append(tables)
            for (word, table) in zip(doc, tables):
                self.table_counts.np[table] += 1
                self.table_counts_per_doc[table, doc_num] += 1
                # update the sumTableCustomers
                self.sum_squared_table_customers[table] += np.outer(
                    self.vocab_embeddings[word], self.vocab_embeddings[word])

                self.update_table_params(table, word)
Beispiel #46
0
 def findCost(self, y, t):
     # Cost function is tc'y - log det(-sum... + I)
     res = t * np.dot(self.C, y)
     A = np.zeros((self.COUNT, self.COUNT),dtype=np.complex128)
     for i in range(len(y)):
         A -= y[i] * self.F_MATRICES[i]
     A += np.identity(self.COUNT)
     # Find log determinant, there is an alternative calculation
     (sign, logdet) = slogdet(A)
     res -= sign*logdet
     return res
def log_gaussian_pdf(x_i, mu_c, sigma_c):
    """
    Computes log N(x_i | mu_c, sigma_c)
    """
    n = len(mu_c)
    a = n * np.log(2 * np.pi)
    _, b = slogdet(sigma_c)

    y = np.linalg.solve(sigma_c, x_i - mu_c)
    c = np.dot(x_i - mu_c, y)
    return -0.5 * (a + b + c)
Beispiel #48
0
def draw_beta_full_cov(k, s, w, size=1):
    """
    draw beta from posterior (depends on k, s, w), eq 9 (Rasmussen 2000), using ARS
    the covariance matrix of the model is full cov
    Make it robust with an expanding range in case of failure
    """
    D = w.shape[0]

    # compute Determinant of w, det(w)
    logdet_w = slogdet(w)[1]
    # compute cumculative sum j from i to k, [ log(det(sj))- trace(w * sj)]
    cumculative_sum_equation = 0
    for sj in s:
        sj = np.reshape(sj, (D, D))
        cumculative_sum_equation += slogdet(sj)[1]
        cumculative_sum_equation -= np.trace(np.dot(w, sj))
    lb = D
    ars = ARS(log_p_beta_full_cov, log_p_beta_prime_full_cov, xi=[lb + 1, lb + 1000], lb=lb, ub=float("inf"), \
              k=k, s=s, w=w, D=D, logdet_w=logdet_w, cumculative_sum_equation=cumculative_sum_equation)
    return ars.draw(size)
Beispiel #49
0
    def numeric(self, values):
        """Returns the logdet of PSD matrix A.

        For PSD matrix A, this is the sum of logs of eigenvalues of A
        and is equivalent to the nuclear norm of the matrix logarithm of A.
        """
        sign, logdet = LA.slogdet(values[0])
        if sign == 1:
            return logdet
        else:
            return -np.inf
Beispiel #50
0
    def test_sparse_inv_covariance(self, q, alpha_ratio, figname):
        # minimize -log(det(S)) + trace(S*Q) + \alpha*||S||_1 subject to S is symmetric PSD.

        # Problem data.
        # q: Dimension of matrix.
        p = 1000  # Number of samples.
        ratio = 0.9  # Fraction of zeros in S.

        S_true = sparse.csc_matrix(make_sparse_spd_matrix(q, ratio))
        Sigma = sparse.linalg.inv(S_true).todense()
        z_sample = np.real(sp.linalg.sqrtm(Sigma)).dot(np.random.randn(
            q, p))  # make sure it's real matrices.
        Q = np.cov(z_sample)
        print('Q is positive definite? {}'.format(bool(LA.slogdet(Q)[0])))

        mask = np.ones(Q.shape, dtype=bool)
        np.fill_diagonal(mask, 0)
        alpha_max = np.max(np.abs(Q)[mask])
        alpha = alpha_ratio * alpha_max  # 0.001 for q = 100, 0.01 for q = 50.

        # Convert problem to standard form.
        # f_1(S_1) = -log(det(S_1)) + trace(S_1*Q) on symmetric PSD matrices, f_2(S_2) = \alpha*||S_2||_1.
        # A_1 = I, A_2 = -I, b = 0.
        prox_list = [
            lambda v, t: prox_neg_log_det(
                v.reshape(
                    (q, q), order='C'), t, lin_term=t * Q).ravel(order='C'),
            lambda v, t: prox_norm1(v, t * alpha)
        ]
        A_list = [sparse.eye(q * q), -sparse.eye(q * q)]
        b = np.zeros(q * q)

        # Solve with DRS.
        drs_result = a2dr(prox_list,
                          A_list,
                          b,
                          anderson=False,
                          precond=True,
                          max_iter=self.MAX_ITER)
        print('Finished DRS.')

        # Solve with A2DR.
        a2dr_result = a2dr(prox_list,
                           A_list,
                           b,
                           anderson=True,
                           precond=True,
                           max_iter=self.MAX_ITER)
        # lam_accel = 0 seems to work well sometimes, although it oscillates a lot.
        a2dr_S = a2dr_result["x_vals"][-1].reshape((q, q), order='C')
        self.compare_total(drs_result, a2dr_result, figname)
        print('Finished A2DR.')
        print('recovered sparsity = {}'.format(
            np.sum(a2dr_S != 0) * 1.0 / a2dr_S.shape[0]**2))
Beispiel #51
0
    def _logdetXX(self):
        """
        log(|XᵀX|).
        """
        if not self._restricted:
            return 0.0

        ldet = slogdet(self._Xsvd.US.T @ self._Xsvd.US)
        if ldet[0] != 1.0:
            raise ValueError("The determinant of XᵀX should be positive.")
        return ldet[1]
Beispiel #52
0
    def __init__(self, means, covs, weights=None):
        if weights is None:
            weights = np.ones(len(means), dtype=float)

        assert len(means) == len(weights) and len(means) == len(covs)

        self.K = len(means)
        self.means = [np.array(m) for m in means]
        self.icovs = [linalg.inv(c) for c in covs]
        self.logws = [np.log(0.5 * w / np.pi) - 0.5 * linalg.slogdet(c)[1]
                            for w, c in zip(weights / np.sum(weights), covs)]
def log_pdf(x, mu, T, n):
    [k, N] = x.shape
    xm = x - mu
    (sign, logdet) = slogdet(T)
    logc = math.lgamma((n + k) / 2.0)
    logc += 0.5 * logdet
    logc -= math.lgamma(n / 2.0)
    logc -= np.log(n * math.pi) * (k / 2.0)
    logp = -0.5 * (n + k) * np.log(1 + xm.T * T * xm / n)
    logp = logp + logc
    return float(logp)
Beispiel #54
0
    def log_marg_k(self, k):
        """
        Return the log marginal probability of the data vectors assigned to
        component `k`.

        The log marginal probability p(X) = p(x_1, x_2, ..., x_N) is returned
        for the data vectors assigned to component `k`. See (266) in Murphy's
        bayesGauss notes, p. 21.
        """
        k_N = self.prior.k_0 + self.counts[k]
        v_N = self.prior.v_0 + self.counts[k]
        m_N = self.m_N_numerators[k] / k_N
        S_N = self.S_N_partials[k] - k_N * np.outer(m_N, m_N)
        i = np.arange(1, self.D + 1, dtype=np.int)
        return (-self.counts[k] * self.D / 2. * self._cached_log_pi +
                self.D / 2. * math.log(self.prior.k_0) -
                self.D / 2. * math.log(k_N) +
                self.prior.v_0 / 2. * slogdet(self.prior.S_0)[1] -
                v_N / 2. * slogdet(S_N)[1] +
                np.sum(self._cached_gammaln_by_2[v_N + 1 - i] -
                       self._cached_gammaln_by_2[self.prior.v_0 + 1 - i]))
Beispiel #55
0
 def scan_energies(self, k, Earray, m=range(-6, 7)):
     self.bsize = len(m)
     self.m = m
     sign = []
     logdet = []
     for E in Earray:
         H, S = self.fill_arrays(k, E)
         #            val = det(H - E * S)
         val = slogdet(H - E * S)
         sign.append(val[0])
         logdet.append(val[1])
     return array(sign), array(logdet)
Beispiel #56
0
    def value(self):
        mean = self._mean.value()
        cov = self._cov.value()
        ym = self._y - mean
        Kiym = solve(cov, ym)

        (s, logdet) = slogdet(cov)
        if not s == 1.0:
            raise RuntimeError("This determinant should not be negative.")

        n = len(self._y)
        return -(logdet + ym.dot(Kiym) + n * log(2 * pi)) / 2
Beispiel #57
0
def get_logdet(m):
    from numpy.linalg import slogdet
    logdet = slogdet(m)

    if logdet[0] == -1:  # pragma: no cover
        raise ValueError("Matrix is not positive definite")
    elif logdet[0] == 0:  # pragma: no cover
        raise ValueError("Matrix is singular")
    else:
        logdet = logdet[1]

    return logdet
Beispiel #58
0
    def test_affine_preconditioner_logjacobian(self):
        meanIn = randn(5, 1)
        meanOut = randn(2, 1)
        preIn = randn(5, 5)
        preOut = randn(2, 2)
        predictor = randn(2, 5)

        pre = AffinePreconditioner(meanIn, meanOut, preIn, preOut, predictor)

        self.assertAlmostEqual(
            mean(pre.logjacobian(randn(5, 10), randn(2, 10))),
            slogdet(preOut)[1])
Beispiel #59
0
def stnll(x, m, a, c, B, D):
    '''
    Compute Student-t negative log likelihood (Appendix A, eqn. (20))
    '''
    mu = m
    nu = a-D+1
    Lambda = c*float(nu)/(c+1)*B
    S = np.dot(np.dot((x-mu).T, Lambda), (x-mu))
    _, logdetL = slogdet(Lambda)
    return float(nu+D)/2.*np.log(1.+S/float(nu))\
        - 0.5*logdetL+gammaln(nu/2.)\
        - gammaln((float(nu)+D)/2.)+D/2.*np.log(float(nu)*np.pi)
Beispiel #60
0
 def __init__(self, data, label, alpha=1e-6):
     GaussianClassifier.__init__(self, data, label, alpha)
     self.log_determinants = np.ndarray(shape=self.num_classes,
                                        dtype=np.float)
     for i in range(self.num_classes):
         s, logdet = la.slogdet(self.variances[i])
         if s == 0:
             raise Exception("singular matrix")
         self.log_determinants[i] = logdet
     self.precisions = np.array(
         [la.inv(self.variances[i]) for i in range(self.num_classes)])
     del self.variances