Пример #1
0
 def step(self, x, last_b):
     # initialize
     m = len(x)
     mu = np.matrix(last_b).T
     sigma = self.sigma
     theta = self.theta
     eps = self.eps
     x = np.matrix(x).T    # matrices are easier to manipulate
     
     # 4. Calculate the following variables
     M = mu.T * x
     V = x.T * sigma * x
     x_upper = sum(diag(sigma) * x) / trace(sigma)  
     
     # 5. Update the portfolio distribution
     mu, sigma = self.update(x, x_upper, mu, sigma, M, V, theta, eps)
     
     # 6. Normalize mu and sigma
     mu = tools.simplex_proj(mu)
     sigma = sigma / (m**2 * trace(sigma))
     """
     sigma(sigma < 1e-4*eye(m)) = 1e-4;
     """
     self.sigma = sigma
     return mu
Пример #2
0
def fid(target_unitary, error_channel_operators, density_matrix, symbolic=1):
	"""Fidelity between a unitary gate and a non-necessarily unitary gate,
	for a given initial density matrix. This is later used when calculating
	the worst case fidelity.
	Notice that the input format of the general channel is a list of Kraus
	operators instead of a process matrix.  The input format of the target
	unitary is just the matrix itself, not its process matrix.
	symbolic = 1 is the case when the the input matrices are sympy, 
	while symbolic = 0 is used when the input matrices are numpy.
	"""
	V, K, rho = target_unitary, error_channel_operators, density_matrix
	if symbolic:	
		Tra = (((V.H)*K[0])*rho).trace()
		fid = Tra*(fun.conjugate(Tra))
		for i in range(1,len(K)):
			Tra = (((V.H)*K[i])*rho).trace()
			fid += Tra*(fun.conjugate(Tra))
		return fid.expand()
	else:
		Tra = np.trace((V.H)*K[0]*rho)
		fid = Tra*(Tra.conjugate())
		for i in range(1,len(K)):
			Tra = np.trace((V.H)*K[i]*rho)
			fid += Tra*(Tra.conjugate())
		return fid
Пример #3
0
    def test_mapping_cost(
        self,
        other,
        bend_coef=DEFAULT_LAMBDA[1],
        outlierprior=1e-1,
        outlierfrac=1e-2,
        outliercutoff=1e-2,
        T=5e-3,
        norm_iters=DEFAULT_NORM_ITERS,
    ):
        mapping_err = self.mapping_cost(other, outlierprior, outlierfrac, outliercutoff, T, norm_iters)
        for i in range(self.N):
            ## compute error for 0 on cpu
            s_gpu = mapping_err[i]
            s_cpu = np.float32(0)
            xt = self.pts_t[i].get()
            xw = self.pts_w[i].get()

            yt = other.pts_t[i].get()
            yw = other.pts_w[i].get()

            ##use the trace b/c then numpy will use float32s all the way
            s_cpu += np.trace(xt.T.dot(xt) + xw.T.dot(xw) - 2 * xw.T.dot(xt))
            s_cpu += np.trace(yt.T.dot(yt) + yw.T.dot(yw) - 2 * yw.T.dot(yt))

            if not np.isclose(s_cpu, s_gpu, atol=1e-4):
                ## high err tolerance is b/c of difference in cpu and gpu precision?
                print "cpu and gpu sum sq differences differ!!!"
                ipy.embed()
                sys.exit(1)
Пример #4
0
    def __update_tau(self, X):
        """
        Update b_tau_tilde, as a_tau_tilde is independent of other update rules

        b_tau_tilde = b_tau + 1/2 sum ( Z  )
        where Z =
        || X_n ||^2 + <|| mu ||^2> + Tr(<W.T * W> <z_n * z_n.T>) +
            2*<mu.T> * <W> * <z_n> - 2 * X_n.T * <W> * <z_n> - 2 * X_n.T * <mu>
        """
        x_norm_sq = np.power(np.linalg.norm(X, axis=0), 2)
        # <|mu|^2> = <mu.T mu> = Tr(Sigma_mu) + mean_mu.T mean_mu
        exp_mu_norm_sq = np.trace(self.sigma_mu) + np.dot(self.mean_mu.T, self.mean_mu)
        exp_mu_norm_sq = exp_mu_norm_sq[0] # reshape from (1,1) to (1,)

        # TODO what is <W.T W>
        exp_w = self.means_w
        exp_wt_w = np.dot(exp_w.T, exp_w) # TODO fix
        exp_z_zt = self.N * self.sigma_z + np.dot(self.means_z, self.means_z.T)

        trace_w_z = np.trace(np.dot(exp_wt_w, exp_z_zt))

        mu_w_z = np.dot(np.dot(self.mean_mu.T, self.means_w), self.means_z)

        x_w_z = np.dot(X.T, self.means_w).T * self.means_z

        x_mu = np.dot(X.T, self.mean_mu)

        big_sum = np.sum(x_norm_sq) + self.N * exp_mu_norm_sq + trace_w_z + \
                  2*np.sum(mu_w_z) - 2*np.sum(x_w_z) - 2*np.sum(x_mu)

        self.b_tau_tilde = self.b_tau + 0.5*big_sum
Пример #5
0
	def confmat(self,inputs,targets):
		"""Confusion matrix"""

		# Add the inputs that match the bias node
		inputs = np.concatenate((inputs,-np.ones((self.nData,1))),axis=1)
		
		outputs = np.dot(inputs,self.weights)
	
		nClasses = np.shape(targets)[1]

		if nClasses==1:
			nClasses = 2
			outputs = np.where(outputs>0,1,0)
		else:
			# 1-of-N encoding
			outputs = np.argmax(outputs,1)
			targets = np.argmax(targets,1)

		cm = np.zeros((nClasses,nClasses))
		for i in range(nClasses):
			for j in range(nClasses):
				cm[i,j] = np.sum(np.where(outputs==i,1,0)*np.where(targets==j,1,0))

		print cm
		print np.trace(cm)/np.sum(cm)
Пример #6
0
def grad_log_like(phis, *args):
    x_train, t_train = args
    
    #init the matrices for the derivatives of each phi according to each pair of data points
    dert0 = np.zeros((x_train.shape[0], x_train.shape[0]))
    dert1 = np.zeros((x_train.shape[0], x_train.shape[0]))
    dert2 = np.zeros((x_train.shape[0], x_train.shape[0]))
    dert3 = np.zeros((x_train.shape[0], x_train.shape[0]))
    
    #vector of the final result of the derivatives
    der = np.zeros_like(phis)
    K = computeK_opt(x_train, x_train, phis)
    C = computeC(K, beta)
    invC = np.linalg.inv(C)
    for i in range(len(x_train)):
        for j in range(len(x_train)):
            dert0[i,j] = np.exp((-np.exp(phis[1])/2)*((x_train[i] - x_train[j])**2))*np.exp(phis[0])
            dert1[i,j] = -0.5*np.exp(phis[0])*np.exp((-np.exp(phis[1])/2)*((x_train[i] - x_train[j])**2))*((x_train[i] - x_train[j])**2)*np.exp(phis[1])
            dert2[i,j] = np.exp(phis[2])
            dert3[i,j] = x_train[i]*x_train[j]*np.exp(phis[3])
    
    # get the derivatives of the negative log-likelihood
    der[0] = -(((-1/2)*np.trace(np.dot(invC, dert0))) + ((1/2)*np.dot(np.dot(np.dot(np.dot(t_train.T, invC),dert0), invC),t_train)))
    der[1] = -(((-1/2)*np.trace(np.dot(invC, dert1))) + ((1/2)*np.dot(np.dot(np.dot(np.dot(t_train.T, invC),dert1), invC),t_train)))
    der[2] = -(((-1/2)*np.trace(np.dot(invC, dert2))) + ((1/2)*np.dot(np.dot(np.dot(np.dot(t_train.T, invC),dert2), invC),t_train)))
    der[3] = -(((-1/2)*np.trace(np.dot(invC, dert3))) + ((1/2)*np.dot(np.dot(np.dot(np.dot(t_train.T, invC),dert3), invC),t_train)))
    
    return der
Пример #7
0
    def _fit(self, cov_a, cov_b):
        """Aux Function (modifies cov_a and cov_b in-place)."""
        cov_a /= np.trace(cov_a)
        cov_b /= np.trace(cov_b)
        # computes the eigen values
        lambda_, u = linalg.eigh(cov_a + cov_b)
        # sort them
        ind = np.argsort(lambda_)[::-1]
        lambda2_ = lambda_[ind]

        u = u[:, ind]
        p = np.dot(np.sqrt(linalg.pinv(np.diag(lambda2_))), u.T)

        # Compute the generalized eigen value problem
        w_a = np.dot(np.dot(p, cov_a), p.T)
        w_b = np.dot(np.dot(p, cov_b), p.T)
        # and solve it
        vals, vecs = linalg.eigh(w_a, w_b)
        # sort vectors by discriminative power using eigen values
        ind = np.argsort(np.maximum(vals, 1.0 / vals))[::-1]
        vecs = vecs[:, ind]
        # and project
        w = np.dot(vecs.T, p)

        self.filters_ = w
        self.patterns_ = linalg.pinv(w).T
Пример #8
0
def m_step_Q(emd, stationary):
    """
    Computes the optimised state-transition covariance hyperparameters `Q' of
    the natural parameters of the posterior distributions over time. Here
    just one single scalar is considered

    :param container.EMData emd:
        All data pertaining to the EM algorithm.
    :param stationary:
        If 'all' stationary on all thetas is assumed.
    """
    inv_lmbda = 0
    if emd.param_est_eta == 'exact':
        for i in range(1, emd.T):
            lag_one_covariance = emd.sigma_s_lag[i, :, :]
            tmp = emd.theta_s[i, :] - emd.theta_s[i - 1, :]
            inv_lmbda += numpy.trace(emd.sigma_s[i, :, :]) - \
                         2 * numpy.trace(lag_one_covariance) + \
                         numpy.trace(emd.sigma_s[i - 1, :, :]) + \
                         numpy.dot(tmp, tmp)
        emd.Q = inv_lmbda / emd.D / (emd.T - 1) * numpy.identity(emd.D)
    else:
        for i in range(1, emd.T):
            lag_one_covariance = emd.sigma_s_lag[i, :]
            tmp = emd.theta_s[i, :] - emd.theta_s[i - 1, :]
            inv_lmbda += numpy.sum(emd.sigma_s[i]) - \
                         2 * numpy.sum(lag_one_covariance) + \
                         numpy.sum(emd.sigma_s[i - 1]) + \
                         numpy.dot(tmp, tmp)
        emd.Q = inv_lmbda / emd.D / (emd.T - 1) * \
                numpy.identity(emd.D)
    if stationary == 'all':
        emd.Q = numpy.zeros(emd.Q.shape)
Пример #9
0
 def maxwell_sihvola(self,dielectric_medium,dielecv,shape,L,vf) :
     """Calculate the effective constant permittivity using the maxwell garnett method 
        dielectric_medium is the dielectric constant tensor of the medium
        dielecv is the total frequency dielectric constant tensor at the current frequency
        shape is the name of the current shape
        L is the shapes depolarisation matrix
        vf is the volume fraction of filler
        The routine returns the effective dielectric constant"""
     # Equation 6.29 on page 123 of Sihvola
     # Equation 6.40 gives the averaging over the orientation function
     # See also equation 5.80 on page 102 and equation 4.31 on page 70
     Me = dielectric_medium
     # assume that the medium is isotropic calculate the inverse of the dielectric
     Mem1 = 3.0 / np.trace(Me)
     Mi = dielecv
     # calculate the polarisability matrix x the number density of inclusions
     nA = vf*np.dot( (Mi-Me), np.linalg.inv( self.unit + (Mem1 * np.dot(L, (Mi - Me)))))
     nAL = np.dot((nA),L)
     # average the polarisability over orientation
     nA = np.trace(nA) / 3.0 * self.unit
     # average the polarisability*L over orientation
     nAL = np.trace(nAL) / 3.0 * Mem1 * self.unit
     # Calculate the average polarisation factor which scales the average field 
     # based on equation 5.80
     # <P> = pol . <E>
     pol = np.dot(np.linalg.inv(self.unit - nAL), nA)
     # Meff . <E> = Me . <E> + <P>
     # Meff . <E> = Me. <E> + pol . <E>
     # Meff = Me + pol
     effd         = dielectric_medium + pol
     # Average over orientation
     trace = np.trace(effd) / 3.0 
     effdielec = np.array ( [ [trace, 0, 0], [0,trace,0], [0,0,trace] ] )
     return effdielec 
Пример #10
0
    def test_pullback(self):
        (D,P,N) = 2,5,10
        A_data = numpy.zeros((D,P,N,N))
        for d in range(D):
            for p in range(P):
                tmp = numpy.random.rand(N,N)
                A_data[d,p,:,:] = numpy.dot(tmp.T,tmp)

                if d == 0:
                    A_data[d,p,:,:] += N * numpy.diag(numpy.random.rand(N))

        A = UTPM(A_data)
        l,Q = UTPM.eigh(A)

        L_data = UTPM._diag(l.data)
        L = UTPM(L_data)

        assert_array_almost_equal(UTPM.dot(Q, UTPM.dot(L,Q.T)).data, A.data, decimal = 13)

        lbar = UTPM(numpy.random.rand(*(D,P,N)))
        Qbar = UTPM(numpy.random.rand(*(D,P,N,N)))

        Abar = UTPM.pb_eigh( lbar, Qbar, A, l, Q)

        Abar = Abar.data[0,0]
        Adot = A.data[1,0]

        Lbar = UTPM._diag(lbar.data)[0,0]
        Ldot = UTPM._diag(l.data)[1,0]

        Qbar = Qbar.data[0,0]
        Qdot = Q.data[1,0]

        assert_almost_equal(numpy.trace(numpy.dot(Abar.T, Adot)), numpy.trace( numpy.dot(Lbar.T, Ldot) + numpy.dot(Qbar.T, Qdot)))
Пример #11
0
    def test_pullback_repeated_eigenvalues(self):
        D,P,N = 2,1,6
        A = UTPM(numpy.zeros((D,P,N,N)))
        V = UTPM(numpy.random.rand(D,P,N,N))

        A.data[0,0] = numpy.diag([2,2,3,3.,4,5])
        A.data[1,0] = numpy.diag([5,1,3,1.,1,3])

        V,Rtilde = UTPM.qr(V)
        A = UTPM.dot(UTPM.dot(V.T, A), V)

        l,Q = UTPM.eigh(A)

        L_data = UTPM._diag(l.data)
        L = UTPM(L_data)

        assert_array_almost_equal(UTPM.dot(Q, UTPM.dot(L,Q.T)).data, A.data, decimal = 13)

        lbar = UTPM(numpy.random.rand(*(D,P,N)))
        Qbar = UTPM(numpy.random.rand(*(D,P,N,N)))

        Abar = UTPM.pb_eigh( lbar, Qbar, A, l, Q)

        Abar = Abar.data[0,0]
        Adot = A.data[1,0]

        Lbar = UTPM._diag(lbar.data)[0,0]
        Ldot = UTPM._diag(l.data)[1,0]

        Qbar = Qbar.data[0,0]
        Qdot = Q.data[1,0]

        assert_almost_equal(numpy.trace(numpy.dot(Abar.T, Adot)), numpy.trace( numpy.dot(Lbar.T, Ldot) + numpy.dot(Qbar.T, Qdot)))
Пример #12
0
    def test_eigh1_pushforward(self):
        (D,P,N) = 2,1,2
        A = UTPM(numpy.zeros((D,P,N,N)))
        A.data[0,0] = numpy.eye(N)
        A.data[1,0] = numpy.diag([3,4])

        L,Q,b = UTPM.eigh1(A)

        assert_array_almost_equal(UTPM.dot(Q, UTPM.dot(L,Q.T)).data, A.data, decimal = 13)

        Lbar = UTPM.diag(UTPM(numpy.zeros((D,P,N))))
        Lbar.data[0,0] = [0.5,0.5]
        Qbar = UTPM(numpy.random.rand(*(D,P,N,N)))

        Abar = UTPM.pb_eigh1( Lbar, Qbar, None, A, L, Q, b)

        Abar = Abar.data[0,0]
        Adot = A.data[1,0]

        Lbar = Lbar.data[0,0]
        Ldot = L.data[1,0]

        Qbar = Qbar.data[0,0]
        Qdot = Q.data[1,0]

        assert_almost_equal(numpy.trace(numpy.dot(Abar.T, Adot)), numpy.trace( numpy.dot(Lbar.T, Ldot) + numpy.dot(Qbar.T, Qdot)))
Пример #13
0
 def test_det_ovlp(self):
     mf = scf.UHF(mol)
     mf.scf()
     s, x = mf.det_ovlp(mf.mo_coeff, mf.mo_coeff, mf.mo_occ, mf.mo_occ)
     self.assertAlmostEqual(s, 1.000000000, 9)
     self.assertAlmostEqual(numpy.trace(x[0]), mf.nelec[0]*1.000000000, 9)
     self.assertAlmostEqual(numpy.trace(x[0]), mf.nelec[1]*1.000000000, 9)
Пример #14
0
        def grad_nlogprob(hypers):
            amp2  = np.exp(hypers[0])
            noise = np.exp(hypers[1])
            ls    = np.exp(hypers[2:])

            chol, corr, grad_corr = memoize(amp2, noise, ls)
            solve   = spla.cho_solve((chol, True), diffs)
            inv_cov = spla.cho_solve((chol, True), np.eye(chol.shape[0]))

            jacobian = np.outer(solve, solve) - inv_cov

            grad = np.zeros(self.D + 2)

            # Log amplitude gradient.
            grad[0] = 0.5 * np.trace(np.dot( jacobian, corr + 1e-6*np.eye(chol.shape[0]))) * amp2

            # Log noise gradient.
            grad[1] = 0.5 * np.trace(np.dot( jacobian, np.eye(chol.shape[0]))) * noise

            # Log length scale gradients.
            for dd in xrange(self.D):
                grad[dd+2] = 1 * np.trace(np.dot( jacobian, -amp2*grad_corr[:,:,dd]*comp[:,dd][:,np.newaxis]/(np.exp(ls[dd]))))*np.exp(ls[dd])

            # Roll in the prior variance.
            #grad -= 2*hypers/self.hyper_prior

            return -grad
Пример #15
0
  def __calcMergeCost(self, weightA, meanA, precA, weightB, meanB, precB):
    """Calculates and returns the cost of merging two Gaussians."""
    # (For anyone wondering about the fact we are comparing them against each other rather than against the result of merging them that is because this way tends to get better results.)

    # The log determinants and delta...
    logDetA = math.log(numpy.linalg.det(precA))
    logDetB = math.log(numpy.linalg.det(precB))
    delta = meanA - meanB

    # Kullback-Leibler of representing A using B...
    klA = logDetB - logDetA
    klA += numpy.trace(numpy.dot(precB, numpy.linalg.inv(precA)))
    klA += numpy.dot(numpy.dot(delta, precB), delta)
    klA -= precA.shape[0]
    klA *= 0.5

    # Kullback-Leibler of representing B using A...
    klB = logDetA - logDetB
    klB += numpy.trace(numpy.dot(precA, numpy.linalg.inv(precB)))
    klB += numpy.dot(numpy.dot(delta, precA), delta)
    klB -= precB.shape[0]
    klB *= 0.5

    # Return a weighted average...
    return weightA * klA + weightB * klB
Пример #16
0
 def _brug_minimise_scalar(self,variables,eps1,eps2,shape,L,f1) :
     # unpack the complex number from the variables
     # two things going on here. 
     # 1. the two variables refer to the real and imaginary components
     # 2. we require the imaginary component to be positive
     trace = complex(variables[0],np.exp(variables[1])-1.0)
     epsbr = np.array ( [ [trace, 0, 0], [0,trace,0], [0,0,trace] ] )
     f2 = 1.0 - f1
     b1 = np.dot(L,(eps1 - epsbr))
     b2 = np.dot(L,(eps2 - epsbr))
     tb1 = np.trace(b1)/3.0
     tb2 = np.trace(b2)/3.0
     ta1 = 1.0/ ( 1.0 + tb1 )
     ta2 = 1.0/ ( 1.0 + tb2 )
     c1 = eps1-epsbr
     c2 = eps2-epsbr
     tc1 = np.trace(c1)/3.0
     tc2 = np.trace(c2)/3.0
     # alpha1 and 2 are the polarisabilities of 1 and 2 in the effective medium 
     talpha1 = tc1 * ta1
     talpha2 = tc2 * ta2
     error = f1*talpha1 + f2*talpha2
     error = np.abs(error.conjugate() * error)
     # Nasty issue in the powell method, the convergence on tol is given
     # relative to the solution (0.0).  Only a small number is added.
     # So we shift the solution by 1.0, the tol is now relative to 1.0
     return 1.0+error
Пример #17
0
def SolveResponse(HamDMET, NelecActiveSpace, orb_i, omega, eta, toSolve, GSenergy, GSvector, printoutput=False):

    GFvalue, Re2RDMresponse, Im2RDMresponse, A_2RDMresponse = SolveResponseBASE(
        HamDMET, NelecActiveSpace, orb_i, omega, eta, toSolve, GSenergy, GSvector, printoutput
    )

    # Calculate the 1RDMs from the 2RDMs
    RDM_A = np.einsum("ikjk->ij", A_2RDMresponse)
    RDM_R = np.einsum("ikjk->ij", Re2RDMresponse)
    RDM_I = np.einsum("ikjk->ij", Im2RDMresponse)

    if (toSolve == "F") or (toSolve == "B"):
        elecNum = NelecActiveSpace
    if toSolve == "A":
        elecNum = NelecActiveSpace + 1
    if toSolve == "R":
        elecNum = NelecActiveSpace - 1

    # Now 1RDM for response as if calculated from normalized wave function
    norm_A = np.trace(RDM_A) / elecNum
    norm_R = np.trace(RDM_R) / elecNum
    norm_I = np.trace(RDM_I) / elecNum
    RDM_A = RDM_A / norm_A
    RDM_R = RDM_R / norm_R
    RDM_I = RDM_I / norm_I

    return (GFvalue, RDM_A, RDM_R, RDM_I, norm_A, norm_R, norm_I)
    def _prepare_data(self, k_point=None):
        """
        Sets all necessary fields for 1D calculations. Sorts atom indices to improve parallelism.
        :returns: number of atoms, sorted atom indices
        """
        # load powder data for one k
        clerk = AbinsModules.IOmodule(input_filename=self._input_filename,
                                      group_name=AbinsModules.AbinsParameters.powder_data_group)
        powder_data = clerk.load(list_of_datasets=["powder_data"])
        self._a_tensors = powder_data["datasets"]["powder_data"]["a_tensors"][k_point]
        self._b_tensors = powder_data["datasets"]["powder_data"]["b_tensors"][k_point]
        self._a_traces = np.trace(a=self._a_tensors, axis1=1, axis2=2)
        self._b_traces = np.trace(a=self._b_tensors, axis1=2, axis2=3)

        # load dft data for one k point
        clerk = AbinsModules.IOmodule(input_filename=self._input_filename,
                                      group_name=AbinsModules.AbinsParameters.ab_initio_group)
        dft_data = clerk.load(list_of_datasets=["frequencies", "weights"])

        frequencies = dft_data["datasets"]["frequencies"][int(k_point)]
        indx = frequencies > AbinsModules.AbinsConstants.ACOUSTIC_PHONON_THRESHOLD
        self._fundamentals_freq = frequencies[indx]

        self._weight = dft_data["datasets"]["weights"][int(k_point)]

        # free memory
        gc.collect()
Пример #19
0
 def compute_energy(self):
     """
     Compute the rhf energy
     :return: energy
     """
     for i in range(self.maxiter):
         D0 = np.trace(self.D)
         h = self.T + self.V
         j = np.einsum('mrns,rs',self.g,self.D)
         k = np.einsum('msrn,rs',self.g,self.D)
         v = j-.5*k
         f = h + v
         ft = np.dot(self.X , np.dot(f, self.X))
         e , Ct = la.eigh(ft)
         C = np.dot(self.X,Ct)
         OC = C[:,:self.ndocc]
         self.D = 2*np.dot(OC, OC.T)
         T = h + .5*v
         E = np.dot(T,self.D)
         energy = np.trace(E) + self.V_nuc 
         
         if abs(D0 - np.trace(self.D)) < self.e_convergence:
             break
     self.energy = energy
     print('Final RHF Energy:')
     print(self.energy)
     return(energy)
Пример #20
0
def stein_estimator(cov, precision, nsim=1, nbin=1, biased_precision=True):
    """ Stein estimator
        
    Parameters
    ----------
    cov: numpy array
        covariance
    precision: numpy array
        inverse covariance
    nsim: int
        number of simulations (default 1)
    nbin: int
        number of bins (default 1)
    biased_precision: bool
        use Hartlap correction for inverse covariance (default True)
        
    Returns
    ------
    numpy array
        Stein estimator
    """
    
    if (biased_precision):
        stein = (nsim-nbin-2.)/(nsim-1.)*precision + (nbin*(nbin+1)-2.)/((nsim-1.)*np.trace(cov))*np.eye(nbin)
    else:
        stein = precision + (nbin*(nbin+1)-2.)/((nsim-1.)*np.trace(cov))*np.eye(nbin)
    return stein
Пример #21
0
def test_basic_batch_equality():
    """
    Test basic batch equality specification.
    """
    dims = [4, 8]
    for dim in dims:
        block_dim = int(dim/2)
        # Generate random configurations
        A = np.random.rand(block_dim, block_dim)
        B = np.random.rand(block_dim, block_dim)
        B = np.dot(B.T, B)
        D = np.random.rand(block_dim, block_dim)
        D = np.dot(D.T, D)
        tr_B_D = np.trace(B) + np.trace(D)
        B = B / tr_B_D
        D = D / tr_B_D
        As, bs, Cs, ds, Fs, gradFs, Gs, gradGs = \
                basic_batch_equality(dim, A, B, D)
        tol = 1e-3
        eps = 1e-4
        N_rand = 10
        for (g, gradg) in zip(Gs, gradGs):
            for i in range(N_rand):
                X = np.random.rand(dim, dim)
                val = g(X)
                grad = gradg(X)
                print "grad:\n", grad
                num_grad = numerical_derivative(g, X, eps)
                print "num_grad:\n", num_grad
                assert np.sum(np.abs(grad - num_grad)) < tol
Пример #22
0
    def _update_derived(self):
        """Update derived values from self._mean and self._cov"""

        self.position = self._mean[0:3]
        self.velocity = self._mean[3:6]

        pe = numpy.trace(self._cov[0:3, 0:3])
        self.position_error = 1e6 if pe < 0 else math.sqrt(pe)
        ve = numpy.trace(self._cov[3:6, 3:6])
        self.velocity_error = 1e6 if ve < 0 else math.sqrt(ve)

        lat, lon, alt = self.position_llh = geodesy.ecef2llh(self.position)

        # rotate velocity into the local tangent plane
        lat_r = lat * constants.DTOR
        lon_r = lon * constants.DTOR
        C = numpy.array([[-math.sin(lon_r), math.cos(lon_r), 0],
                         [math.sin(-lat_r) * math.cos(lon_r), math.sin(-lat_r) * math.sin(lon_r), math.cos(-lat_r)],
                         [math.cos(-lat_r) * math.cos(lon_r), math.cos(-lat_r) * math.sin(lon_r), -math.sin(-lat_r)]])
        east, north, up = self.velocity_enu = numpy.dot(C, self.velocity.T).T

        # extract speeds, headings
        self.heading = math.atan2(east, north) * 180.0 / math.pi
        if self.heading < 0:
            self.heading += 360
        self.ground_speed = math.sqrt(north**2 + east**2)
        self.vertical_speed = up

        self.valid = True
Пример #23
0
def CCA(X, Y, eps=1.e-15):
    """
    Canonical corelation analysis of two matrices
    
    Parameters
    ----------
    X array of shape (nbitem,p) 
    Y array of shape (nbitem,q) 
    eps=1.e-15, float is a small biasing constant
                to grant invertibility of the matrices
    
    Returns
    -------
    ccs, array of shape(min(n,p,q) the canonical correlations
        
    Note
    ----
    It is expected that nbitem>>max(p,q)
    """
    from numpy.linalg import cholesky, inv, svd
    if Y.shape[0]!=X.shape[0]:
        raise ValueError,"Incompatible dimensions for X and Y"
    p = X.shape[1]
    q = Y.shape[1]
    sqX = np.dot(X.T,X)
    sqY = np.dot(Y.T,Y)
    sqX += np.trace(sqX)*eps*np.eye(p)
    sqY += np.trace(sqY)*eps*np.eye(q)
    rsqX = cholesky(sqX)
    rsqY = cholesky(sqY)
    iX = inv(rsqX).T
    iY = inv(rsqY).T
    Cxy = np.dot(np.dot(X,iX).T,np.dot(Y,iY))
    uv, ccs, vv = svd(Cxy)
    return ccs
Пример #24
0
    def decide_by_history(self, x, last_b):
        x = self.get_last_rpv(x)
        x = np.reshape(x, (1,x.size))
        last_b = np.reshape(last_b, (1,last_b.size))
        if self.sigma is None:
            self.init_portfolio(x)
        # initialize
        m = len(x)
        mu = np.matrix(last_b).T
        sigma = self.sigma
        theta = self.theta
        eps = self.eps
        x = np.matrix(x).T    # matrices are easier to manipulate

        # 4. Calculate the following variables
        M = (mu.T * x).mean()
        V = x.T * sigma * x
        x_upper = sum(diag(sigma) * x) / trace(sigma)

        # 5. Update the portfolio distribution
        mu, sigma = self.update(x, x_upper, mu, sigma, M, V, theta, eps)

        # 6. Normalize mu and sigma
        mu = self.simplex_proj(mu)
        sigma = sigma / (m**2 * trace(sigma))
        """
        sigma(sigma < 1e-4*eye(m)) = 1e-4;
        """
        self.sigma = sigma

        return np.ravel(mu)
Пример #25
0
 def log_likelihood_X(self, X=None, Z=None, A=None):
     if A == None:
         A = self._A;
     if Z == None:
         Z = self._Z;
     if X == None:
         X = self._X;
         
     assert(X.shape[0] == Z.shape[0]);
     (N, D) = X.shape;
     (N, K) = Z.shape;
     assert(A.shape == (K, D));
     
     log_likelihood = X - numpy.dot(Z, A);
     
     (row, column) = log_likelihood.shape;
     if row > column:
         log_likelihood = numpy.trace(numpy.dot(log_likelihood.transpose(), log_likelihood));
     else:
         log_likelihood = numpy.trace(numpy.dot(log_likelihood, log_likelihood.transpose()));
     
     log_likelihood = -0.5 * log_likelihood / numpy.power(self._sigma_x, 2);
     log_likelihood -= N * D * 0.5 * numpy.log(2 * numpy.pi * numpy.power(self._sigma_x, 2));
                    
     return log_likelihood
Пример #26
0
def rao_blackwell_ledoit_wolf(S, n):
    """Rao-Blackwellized Ledoit-Wolf shrinkaged estimator of the covariance
    matrix.

    Parameters
    ----------
    S : array, shape=(n, n)
        Sample covariance matrix (e.g. estimated with np.cov(X.T))
    n : int
        Number of data points.

    Returns
    -------
    sigma : array, shape=(n, n)
    shrinkage : float

    References
    ----------
    .. [1] Chen, Yilun, Ami Wiesel, and Alfred O. Hero III. "Shrinkage
        estimation of high dimensional covariance matrices" ICASSP (2009)
    """
    p = len(S)
    assert S.shape == (p, p)

    alpha = (n-2)/(n*(n+2))
    beta = ((p+1)*n - 2) / (n*(n+2))

    trace_S2 = np.sum(S*S)  # np.trace(S.dot(S))
    U = ((p * trace_S2 / np.trace(S)**2) - 1)
    rho = min(alpha + beta/U, 1)

    F = (np.trace(S) / p) * np.eye(p)
    return (1-rho)*S + rho*F, rho
Пример #27
0
Файл: pca.py Проект: molmod/yaff
def pca_similarity(covar_a, covar_b):
    """
        Calculates the similarity between the two covariance matrices

        **Arguments:**

        covar_a
            The first covariance matrix.

        covar_b
            The second covariance matrix.
    """
    # Take the square root of the symmetric matrices
    a_sq = spla.sqrtm(covar_a)
    b_sq = spla.sqrtm(covar_b)

    # Check for imaginary entries
    for mat in [a_sq, b_sq]:
        max_imag = np.amax(np.abs(np.imag(mat)))
        mean_real = np.mean(np.abs(np.real(mat)))
        if(max_imag/mean_real > 1e-6):
            Warning('Covariance matrix is not diagonally dominant')

    # Return the PCA similarity (1 - PCA distance)
    return 1 - np.sqrt(np.trace(np.dot(a_sq-b_sq, a_sq-b_sq))/(np.trace(covar_a+covar_b)))
Пример #28
0
    def negentropy(self, E_x=None, E_xxT=None, E_mu=None, E_mumuT=None, E_Sigma_inv=None, E_logdet_Sigma=None):
        """
        Compute the negative entropy of the Gaussian distribution
        :return: E[ ln p(x | mu, sigmasq)] = E[-0.5*log(2*pi) -0.5*E[log |Sigma|] - 0.5 * (x-mu)^T Sigma^{-1} (x-mu)]
        """
        if E_x is None:
            E_x = self.expected_x()

        if E_xxT is None:
            E_xxT = self.expected_xxT()

        if E_mu is None:
            E_mu = self.mu

        if E_mumuT is None:
            E_mumuT = np.outer(self.mu, self.mu)

        if E_Sigma_inv is None:
            E_Sigma_inv = np.linalg.inv(self.Sigma)

        if E_logdet_Sigma is None:
            E_logdet_Sigma = np.linalg.slogdet(self.Sigma)[1]

        H  = -0.5 * np.log(2*np.pi)
        H += -0.5 * E_logdet_Sigma
        # TODO: Replace trace with something more efficient
        H += -0.5 * np.trace(E_Sigma_inv.dot(E_xxT))
        H += E_x.T.dot(E_Sigma_inv).dot(E_mu)
        H += -0.5 * np.trace(E_Sigma_inv.dot(E_mumuT))
        return H
Пример #29
0
def determine_winner(game):
  '''
     Determines the winner of the game passed as argument.
     Returns a Player model object or None.
     Different values per player are used in turn positions and summed.  If the expected sum for victory is found, the winner is the corresponding player.
  '''
  winner = None
  p1_token = 1
  p2_token = 9
  p1_win_sum = p1_token * 3
  p2_win_sum = p2_token * 3
  
  matrix = generate_matrix(game, p1_token, p2_token)
  sums = []
  #diagonals
  sums.append(np.trace(matrix))
  sums.append(np.trace(np.rot90(matrix)))
  #columns
  sums += np.sum(matrix, axis=0).ravel().tolist()
  #rows
  sums += np.sum(matrix, axis=1).ravel().tolist()
  
  if p1_win_sum in sums:
    return game.player_1
  elif p2_win_sum in sums:
    return game.player_2
  else:
    return None
Пример #30
0
def w2distance2D(mu1, sig1, mu2, sig2):
	"""

	Returns the Wasserstein distance between two 2-Dimensional normal distributions

	"""
	t1 = np.linalg.norm(mu1 - mu2)

	#print t1
	t1 = t1 ** 2.0
	#print t1
	t2 = np.trace(sig2) + np.trace(sig1) 
	p1 = np.trace(np.dot(sig1, sig2))
	p2 =  (((np.linalg.det(np.dot(sig1, sig2)))))
	if p2 < 0.0:
		p2 = 0.0
	p2 = np.sqrt(p2)
	tt = p1 + 2.0*p2
	if tt < 0.0:
		tt = 0.0
	t3 = 2.0 * np.sqrt(tt)
	#print t3
	if (t1 + t2 - t3) < 0:
		result = 0.0
		#print "here"
	else:
		result = np.sqrt(t1 + t2 - t3)

	return result