示例#1
0
def w2distance2D(mu1, sig1, mu2, sig2):
	"""

	Returns the Wasserstein distance between two 2-Dimensional normal distributions

	"""
	t1 = np.linalg.norm(mu1 - mu2)

	#print t1
	t1 = t1 ** 2.0
	#print t1
	t2 = np.trace(sig2) + np.trace(sig1) 
	p1 = np.trace(np.dot(sig1, sig2))
	p2 =  (((np.linalg.det(np.dot(sig1, sig2)))))
	if p2 < 0.0:
		p2 = 0.0
	p2 = np.sqrt(p2)
	tt = p1 + 2.0*p2
	if tt < 0.0:
		tt = 0.0
	t3 = 2.0 * np.sqrt(tt)
	#print t3
	if (t1 + t2 - t3) < 0:
		result = 0.0
		#print "here"
	else:
		result = np.sqrt(t1 + t2 - t3)

	return result
示例#2
0
def determine_winner(game):
  '''
     Determines the winner of the game passed as argument.
     Returns a Player model object or None.
     Different values per player are used in turn positions and summed.  If the expected sum for victory is found, the winner is the corresponding player.
  '''
  winner = None
  p1_token = 1
  p2_token = 9
  p1_win_sum = p1_token * 3
  p2_win_sum = p2_token * 3
  
  matrix = generate_matrix(game, p1_token, p2_token)
  sums = []
  #diagonals
  sums.append(np.trace(matrix))
  sums.append(np.trace(np.rot90(matrix)))
  #columns
  sums += np.sum(matrix, axis=0).ravel().tolist()
  #rows
  sums += np.sum(matrix, axis=1).ravel().tolist()
  
  if p1_win_sum in sums:
    return game.player_1
  elif p2_win_sum in sums:
    return game.player_2
  else:
    return None
示例#3
0
文件: pca.py 项目: molmod/yaff
def pca_similarity(covar_a, covar_b):
    """
        Calculates the similarity between the two covariance matrices

        **Arguments:**

        covar_a
            The first covariance matrix.

        covar_b
            The second covariance matrix.
    """
    # Take the square root of the symmetric matrices
    a_sq = spla.sqrtm(covar_a)
    b_sq = spla.sqrtm(covar_b)

    # Check for imaginary entries
    for mat in [a_sq, b_sq]:
        max_imag = np.amax(np.abs(np.imag(mat)))
        mean_real = np.mean(np.abs(np.real(mat)))
        if(max_imag/mean_real > 1e-6):
            Warning('Covariance matrix is not diagonally dominant')

    # Return the PCA similarity (1 - PCA distance)
    return 1 - np.sqrt(np.trace(np.dot(a_sq-b_sq, a_sq-b_sq))/(np.trace(covar_a+covar_b)))
示例#4
0
 def step(self, x, last_b):
     # initialize
     m = len(x)
     mu = np.matrix(last_b).T
     sigma = self.sigma
     theta = self.theta
     eps = self.eps
     x = np.matrix(x).T    # matrices are easier to manipulate
     
     # 4. Calculate the following variables
     M = mu.T * x
     V = x.T * sigma * x
     x_upper = sum(diag(sigma) * x) / trace(sigma)  
     
     # 5. Update the portfolio distribution
     mu, sigma = self.update(x, x_upper, mu, sigma, M, V, theta, eps)
     
     # 6. Normalize mu and sigma
     mu = tools.simplex_proj(mu)
     sigma = sigma / (m**2 * trace(sigma))
     """
     sigma(sigma < 1e-4*eye(m)) = 1e-4;
     """
     self.sigma = sigma
     return mu
示例#5
0
文件: batchtps.py 项目: rll/lfd
    def test_mapping_cost(
        self,
        other,
        bend_coef=DEFAULT_LAMBDA[1],
        outlierprior=1e-1,
        outlierfrac=1e-2,
        outliercutoff=1e-2,
        T=5e-3,
        norm_iters=DEFAULT_NORM_ITERS,
    ):
        mapping_err = self.mapping_cost(other, outlierprior, outlierfrac, outliercutoff, T, norm_iters)
        for i in range(self.N):
            ## compute error for 0 on cpu
            s_gpu = mapping_err[i]
            s_cpu = np.float32(0)
            xt = self.pts_t[i].get()
            xw = self.pts_w[i].get()

            yt = other.pts_t[i].get()
            yw = other.pts_w[i].get()

            ##use the trace b/c then numpy will use float32s all the way
            s_cpu += np.trace(xt.T.dot(xt) + xw.T.dot(xw) - 2 * xw.T.dot(xt))
            s_cpu += np.trace(yt.T.dot(yt) + yw.T.dot(yw) - 2 * yw.T.dot(yt))

            if not np.isclose(s_cpu, s_gpu, atol=1e-4):
                ## high err tolerance is b/c of difference in cpu and gpu precision?
                print "cpu and gpu sum sq differences differ!!!"
                ipy.embed()
                sys.exit(1)
示例#6
0
 def log_likelihood_X(self, X=None, Z=None, A=None):
     if A == None:
         A = self._A;
     if Z == None:
         Z = self._Z;
     if X == None:
         X = self._X;
         
     assert(X.shape[0] == Z.shape[0]);
     (N, D) = X.shape;
     (N, K) = Z.shape;
     assert(A.shape == (K, D));
     
     log_likelihood = X - numpy.dot(Z, A);
     
     (row, column) = log_likelihood.shape;
     if row > column:
         log_likelihood = numpy.trace(numpy.dot(log_likelihood.transpose(), log_likelihood));
     else:
         log_likelihood = numpy.trace(numpy.dot(log_likelihood, log_likelihood.transpose()));
     
     log_likelihood = -0.5 * log_likelihood / numpy.power(self._sigma_x, 2);
     log_likelihood -= N * D * 0.5 * numpy.log(2 * numpy.pi * numpy.power(self._sigma_x, 2));
                    
     return log_likelihood
示例#7
0
def fid(target_unitary, error_channel_operators, density_matrix, symbolic=1):
	"""Fidelity between a unitary gate and a non-necessarily unitary gate,
	for a given initial density matrix. This is later used when calculating
	the worst case fidelity.
	Notice that the input format of the general channel is a list of Kraus
	operators instead of a process matrix.  The input format of the target
	unitary is just the matrix itself, not its process matrix.
	symbolic = 1 is the case when the the input matrices are sympy, 
	while symbolic = 0 is used when the input matrices are numpy.
	"""
	V, K, rho = target_unitary, error_channel_operators, density_matrix
	if symbolic:	
		Tra = (((V.H)*K[0])*rho).trace()
		fid = Tra*(fun.conjugate(Tra))
		for i in range(1,len(K)):
			Tra = (((V.H)*K[i])*rho).trace()
			fid += Tra*(fun.conjugate(Tra))
		return fid.expand()
	else:
		Tra = np.trace((V.H)*K[0]*rho)
		fid = Tra*(Tra.conjugate())
		for i in range(1,len(K)):
			Tra = np.trace((V.H)*K[i]*rho)
			fid += Tra*(Tra.conjugate())
		return fid
示例#8
0
def CCA(X, Y, eps=1.e-15):
    """
    Canonical corelation analysis of two matrices
    
    Parameters
    ----------
    X array of shape (nbitem,p) 
    Y array of shape (nbitem,q) 
    eps=1.e-15, float is a small biasing constant
                to grant invertibility of the matrices
    
    Returns
    -------
    ccs, array of shape(min(n,p,q) the canonical correlations
        
    Note
    ----
    It is expected that nbitem>>max(p,q)
    """
    from numpy.linalg import cholesky, inv, svd
    if Y.shape[0]!=X.shape[0]:
        raise ValueError,"Incompatible dimensions for X and Y"
    p = X.shape[1]
    q = Y.shape[1]
    sqX = np.dot(X.T,X)
    sqY = np.dot(Y.T,Y)
    sqX += np.trace(sqX)*eps*np.eye(p)
    sqY += np.trace(sqY)*eps*np.eye(q)
    rsqX = cholesky(sqX)
    rsqY = cholesky(sqY)
    iX = inv(rsqX).T
    iY = inv(rsqY).T
    Cxy = np.dot(np.dot(X,iX).T,np.dot(Y,iY))
    uv, ccs, vv = svd(Cxy)
    return ccs
示例#9
0
	def confmat(self,inputs,targets):
		"""Confusion matrix"""

		# Add the inputs that match the bias node
		inputs = np.concatenate((inputs,-np.ones((self.nData,1))),axis=1)
		
		outputs = np.dot(inputs,self.weights)
	
		nClasses = np.shape(targets)[1]

		if nClasses==1:
			nClasses = 2
			outputs = np.where(outputs>0,1,0)
		else:
			# 1-of-N encoding
			outputs = np.argmax(outputs,1)
			targets = np.argmax(targets,1)

		cm = np.zeros((nClasses,nClasses))
		for i in range(nClasses):
			for j in range(nClasses):
				cm[i,j] = np.sum(np.where(outputs==i,1,0)*np.where(targets==j,1,0))

		print cm
		print np.trace(cm)/np.sum(cm)
示例#10
0
def test_basic_batch_equality():
    """
    Test basic batch equality specification.
    """
    dims = [4, 8]
    for dim in dims:
        block_dim = int(dim/2)
        # Generate random configurations
        A = np.random.rand(block_dim, block_dim)
        B = np.random.rand(block_dim, block_dim)
        B = np.dot(B.T, B)
        D = np.random.rand(block_dim, block_dim)
        D = np.dot(D.T, D)
        tr_B_D = np.trace(B) + np.trace(D)
        B = B / tr_B_D
        D = D / tr_B_D
        As, bs, Cs, ds, Fs, gradFs, Gs, gradGs = \
                basic_batch_equality(dim, A, B, D)
        tol = 1e-3
        eps = 1e-4
        N_rand = 10
        for (g, gradg) in zip(Gs, gradGs):
            for i in range(N_rand):
                X = np.random.rand(dim, dim)
                val = g(X)
                grad = gradg(X)
                print "grad:\n", grad
                num_grad = numerical_derivative(g, X, eps)
                print "num_grad:\n", num_grad
                assert np.sum(np.abs(grad - num_grad)) < tol
示例#11
0
文件: vpca.py 项目: macabot/mlpm_lab
    def __update_tau(self, X):
        """
        Update b_tau_tilde, as a_tau_tilde is independent of other update rules

        b_tau_tilde = b_tau + 1/2 sum ( Z  )
        where Z =
        || X_n ||^2 + <|| mu ||^2> + Tr(<W.T * W> <z_n * z_n.T>) +
            2*<mu.T> * <W> * <z_n> - 2 * X_n.T * <W> * <z_n> - 2 * X_n.T * <mu>
        """
        x_norm_sq = np.power(np.linalg.norm(X, axis=0), 2)
        # <|mu|^2> = <mu.T mu> = Tr(Sigma_mu) + mean_mu.T mean_mu
        exp_mu_norm_sq = np.trace(self.sigma_mu) + np.dot(self.mean_mu.T, self.mean_mu)
        exp_mu_norm_sq = exp_mu_norm_sq[0] # reshape from (1,1) to (1,)

        # TODO what is <W.T W>
        exp_w = self.means_w
        exp_wt_w = np.dot(exp_w.T, exp_w) # TODO fix
        exp_z_zt = self.N * self.sigma_z + np.dot(self.means_z, self.means_z.T)

        trace_w_z = np.trace(np.dot(exp_wt_w, exp_z_zt))

        mu_w_z = np.dot(np.dot(self.mean_mu.T, self.means_w), self.means_z)

        x_w_z = np.dot(X.T, self.means_w).T * self.means_z

        x_mu = np.dot(X.T, self.mean_mu)

        big_sum = np.sum(x_norm_sq) + self.N * exp_mu_norm_sq + trace_w_z + \
                  2*np.sum(mu_w_z) - 2*np.sum(x_w_z) - 2*np.sum(x_mu)

        self.b_tau_tilde = self.b_tau + 0.5*big_sum
示例#12
0
 def compute_energy(self):
     """
     Compute the rhf energy
     :return: energy
     """
     for i in range(self.maxiter):
         D0 = np.trace(self.D)
         h = self.T + self.V
         j = np.einsum('mrns,rs',self.g,self.D)
         k = np.einsum('msrn,rs',self.g,self.D)
         v = j-.5*k
         f = h + v
         ft = np.dot(self.X , np.dot(f, self.X))
         e , Ct = la.eigh(ft)
         C = np.dot(self.X,Ct)
         OC = C[:,:self.ndocc]
         self.D = 2*np.dot(OC, OC.T)
         T = h + .5*v
         E = np.dot(T,self.D)
         energy = np.trace(E) + self.V_nuc 
         
         if abs(D0 - np.trace(self.D)) < self.e_convergence:
             break
     self.energy = energy
     print('Final RHF Energy:')
     print(self.energy)
     return(energy)
示例#13
0
文件: csp.py 项目: rajul/mne-python
    def _fit(self, cov_a, cov_b):
        """Aux Function (modifies cov_a and cov_b in-place)."""
        cov_a /= np.trace(cov_a)
        cov_b /= np.trace(cov_b)
        # computes the eigen values
        lambda_, u = linalg.eigh(cov_a + cov_b)
        # sort them
        ind = np.argsort(lambda_)[::-1]
        lambda2_ = lambda_[ind]

        u = u[:, ind]
        p = np.dot(np.sqrt(linalg.pinv(np.diag(lambda2_))), u.T)

        # Compute the generalized eigen value problem
        w_a = np.dot(np.dot(p, cov_a), p.T)
        w_b = np.dot(np.dot(p, cov_b), p.T)
        # and solve it
        vals, vecs = linalg.eigh(w_a, w_b)
        # sort vectors by discriminative power using eigen values
        ind = np.argsort(np.maximum(vals, 1.0 / vals))[::-1]
        vecs = vecs[:, ind]
        # and project
        w = np.dot(vecs.T, p)

        self.filters_ = w
        self.patterns_ = linalg.pinv(w).T
示例#14
0
def SolveResponse(HamDMET, NelecActiveSpace, orb_i, omega, eta, toSolve, GSenergy, GSvector, printoutput=False):

    GFvalue, Re2RDMresponse, Im2RDMresponse, A_2RDMresponse = SolveResponseBASE(
        HamDMET, NelecActiveSpace, orb_i, omega, eta, toSolve, GSenergy, GSvector, printoutput
    )

    # Calculate the 1RDMs from the 2RDMs
    RDM_A = np.einsum("ikjk->ij", A_2RDMresponse)
    RDM_R = np.einsum("ikjk->ij", Re2RDMresponse)
    RDM_I = np.einsum("ikjk->ij", Im2RDMresponse)

    if (toSolve == "F") or (toSolve == "B"):
        elecNum = NelecActiveSpace
    if toSolve == "A":
        elecNum = NelecActiveSpace + 1
    if toSolve == "R":
        elecNum = NelecActiveSpace - 1

    # Now 1RDM for response as if calculated from normalized wave function
    norm_A = np.trace(RDM_A) / elecNum
    norm_R = np.trace(RDM_R) / elecNum
    norm_I = np.trace(RDM_I) / elecNum
    RDM_A = RDM_A / norm_A
    RDM_R = RDM_R / norm_R
    RDM_I = RDM_I / norm_I

    return (GFvalue, RDM_A, RDM_R, RDM_I, norm_A, norm_R, norm_I)
示例#15
0
def grad_log_like(phis, *args):
    x_train, t_train = args
    
    #init the matrices for the derivatives of each phi according to each pair of data points
    dert0 = np.zeros((x_train.shape[0], x_train.shape[0]))
    dert1 = np.zeros((x_train.shape[0], x_train.shape[0]))
    dert2 = np.zeros((x_train.shape[0], x_train.shape[0]))
    dert3 = np.zeros((x_train.shape[0], x_train.shape[0]))
    
    #vector of the final result of the derivatives
    der = np.zeros_like(phis)
    K = computeK_opt(x_train, x_train, phis)
    C = computeC(K, beta)
    invC = np.linalg.inv(C)
    for i in range(len(x_train)):
        for j in range(len(x_train)):
            dert0[i,j] = np.exp((-np.exp(phis[1])/2)*((x_train[i] - x_train[j])**2))*np.exp(phis[0])
            dert1[i,j] = -0.5*np.exp(phis[0])*np.exp((-np.exp(phis[1])/2)*((x_train[i] - x_train[j])**2))*((x_train[i] - x_train[j])**2)*np.exp(phis[1])
            dert2[i,j] = np.exp(phis[2])
            dert3[i,j] = x_train[i]*x_train[j]*np.exp(phis[3])
    
    # get the derivatives of the negative log-likelihood
    der[0] = -(((-1/2)*np.trace(np.dot(invC, dert0))) + ((1/2)*np.dot(np.dot(np.dot(np.dot(t_train.T, invC),dert0), invC),t_train)))
    der[1] = -(((-1/2)*np.trace(np.dot(invC, dert1))) + ((1/2)*np.dot(np.dot(np.dot(np.dot(t_train.T, invC),dert1), invC),t_train)))
    der[2] = -(((-1/2)*np.trace(np.dot(invC, dert2))) + ((1/2)*np.dot(np.dot(np.dot(np.dot(t_train.T, invC),dert2), invC),t_train)))
    der[3] = -(((-1/2)*np.trace(np.dot(invC, dert3))) + ((1/2)*np.dot(np.dot(np.dot(np.dot(t_train.T, invC),dert3), invC),t_train)))
    
    return der
示例#16
0
    def negentropy(self, E_x=None, E_xxT=None, E_mu=None, E_mumuT=None, E_Sigma_inv=None, E_logdet_Sigma=None):
        """
        Compute the negative entropy of the Gaussian distribution
        :return: E[ ln p(x | mu, sigmasq)] = E[-0.5*log(2*pi) -0.5*E[log |Sigma|] - 0.5 * (x-mu)^T Sigma^{-1} (x-mu)]
        """
        if E_x is None:
            E_x = self.expected_x()

        if E_xxT is None:
            E_xxT = self.expected_xxT()

        if E_mu is None:
            E_mu = self.mu

        if E_mumuT is None:
            E_mumuT = np.outer(self.mu, self.mu)

        if E_Sigma_inv is None:
            E_Sigma_inv = np.linalg.inv(self.Sigma)

        if E_logdet_Sigma is None:
            E_logdet_Sigma = np.linalg.slogdet(self.Sigma)[1]

        H  = -0.5 * np.log(2*np.pi)
        H += -0.5 * E_logdet_Sigma
        # TODO: Replace trace with something more efficient
        H += -0.5 * np.trace(E_Sigma_inv.dot(E_xxT))
        H += E_x.T.dot(E_Sigma_inv).dot(E_mu)
        H += -0.5 * np.trace(E_Sigma_inv.dot(E_mumuT))
        return H
示例#17
0
  def __calcMergeCost(self, weightA, meanA, precA, weightB, meanB, precB):
    """Calculates and returns the cost of merging two Gaussians."""
    # (For anyone wondering about the fact we are comparing them against each other rather than against the result of merging them that is because this way tends to get better results.)

    # The log determinants and delta...
    logDetA = math.log(numpy.linalg.det(precA))
    logDetB = math.log(numpy.linalg.det(precB))
    delta = meanA - meanB

    # Kullback-Leibler of representing A using B...
    klA = logDetB - logDetA
    klA += numpy.trace(numpy.dot(precB, numpy.linalg.inv(precA)))
    klA += numpy.dot(numpy.dot(delta, precB), delta)
    klA -= precA.shape[0]
    klA *= 0.5

    # Kullback-Leibler of representing B using A...
    klB = logDetA - logDetB
    klB += numpy.trace(numpy.dot(precA, numpy.linalg.inv(precB)))
    klB += numpy.dot(numpy.dot(delta, precA), delta)
    klB -= precB.shape[0]
    klB *= 0.5

    # Return a weighted average...
    return weightA * klA + weightB * klB
示例#18
0
    def decide_by_history(self, x, last_b):
        x = self.get_last_rpv(x)
        x = np.reshape(x, (1,x.size))
        last_b = np.reshape(last_b, (1,last_b.size))
        if self.sigma is None:
            self.init_portfolio(x)
        # initialize
        m = len(x)
        mu = np.matrix(last_b).T
        sigma = self.sigma
        theta = self.theta
        eps = self.eps
        x = np.matrix(x).T    # matrices are easier to manipulate

        # 4. Calculate the following variables
        M = (mu.T * x).mean()
        V = x.T * sigma * x
        x_upper = sum(diag(sigma) * x) / trace(sigma)

        # 5. Update the portfolio distribution
        mu, sigma = self.update(x, x_upper, mu, sigma, M, V, theta, eps)

        # 6. Normalize mu and sigma
        mu = self.simplex_proj(mu)
        sigma = sigma / (m**2 * trace(sigma))
        """
        sigma(sigma < 1e-4*eye(m)) = 1e-4;
        """
        self.sigma = sigma

        return np.ravel(mu)
示例#19
0
def rao_blackwell_ledoit_wolf(S, n):
    """Rao-Blackwellized Ledoit-Wolf shrinkaged estimator of the covariance
    matrix.

    Parameters
    ----------
    S : array, shape=(n, n)
        Sample covariance matrix (e.g. estimated with np.cov(X.T))
    n : int
        Number of data points.

    Returns
    -------
    sigma : array, shape=(n, n)
    shrinkage : float

    References
    ----------
    .. [1] Chen, Yilun, Ami Wiesel, and Alfred O. Hero III. "Shrinkage
        estimation of high dimensional covariance matrices" ICASSP (2009)
    """
    p = len(S)
    assert S.shape == (p, p)

    alpha = (n-2)/(n*(n+2))
    beta = ((p+1)*n - 2) / (n*(n+2))

    trace_S2 = np.sum(S*S)  # np.trace(S.dot(S))
    U = ((p * trace_S2 / np.trace(S)**2) - 1)
    rho = min(alpha + beta/U, 1)

    F = (np.trace(S) / p) * np.eye(p)
    return (1-rho)*S + rho*F, rho
示例#20
0
def stein_estimator(cov, precision, nsim=1, nbin=1, biased_precision=True):
    """ Stein estimator
        
    Parameters
    ----------
    cov: numpy array
        covariance
    precision: numpy array
        inverse covariance
    nsim: int
        number of simulations (default 1)
    nbin: int
        number of bins (default 1)
    biased_precision: bool
        use Hartlap correction for inverse covariance (default True)
        
    Returns
    ------
    numpy array
        Stein estimator
    """
    
    if (biased_precision):
        stein = (nsim-nbin-2.)/(nsim-1.)*precision + (nbin*(nbin+1)-2.)/((nsim-1.)*np.trace(cov))*np.eye(nbin)
    else:
        stein = precision + (nbin*(nbin+1)-2.)/((nsim-1.)*np.trace(cov))*np.eye(nbin)
    return stein
示例#21
0
    def _update_derived(self):
        """Update derived values from self._mean and self._cov"""

        self.position = self._mean[0:3]
        self.velocity = self._mean[3:6]

        pe = numpy.trace(self._cov[0:3, 0:3])
        self.position_error = 1e6 if pe < 0 else math.sqrt(pe)
        ve = numpy.trace(self._cov[3:6, 3:6])
        self.velocity_error = 1e6 if ve < 0 else math.sqrt(ve)

        lat, lon, alt = self.position_llh = geodesy.ecef2llh(self.position)

        # rotate velocity into the local tangent plane
        lat_r = lat * constants.DTOR
        lon_r = lon * constants.DTOR
        C = numpy.array([[-math.sin(lon_r), math.cos(lon_r), 0],
                         [math.sin(-lat_r) * math.cos(lon_r), math.sin(-lat_r) * math.sin(lon_r), math.cos(-lat_r)],
                         [math.cos(-lat_r) * math.cos(lon_r), math.cos(-lat_r) * math.sin(lon_r), -math.sin(-lat_r)]])
        east, north, up = self.velocity_enu = numpy.dot(C, self.velocity.T).T

        # extract speeds, headings
        self.heading = math.atan2(east, north) * 180.0 / math.pi
        if self.heading < 0:
            self.heading += 360
        self.ground_speed = math.sqrt(north**2 + east**2)
        self.vertical_speed = up

        self.valid = True
示例#22
0
 def _brug_minimise_scalar(self,variables,eps1,eps2,shape,L,f1) :
     # unpack the complex number from the variables
     # two things going on here. 
     # 1. the two variables refer to the real and imaginary components
     # 2. we require the imaginary component to be positive
     trace = complex(variables[0],np.exp(variables[1])-1.0)
     epsbr = np.array ( [ [trace, 0, 0], [0,trace,0], [0,0,trace] ] )
     f2 = 1.0 - f1
     b1 = np.dot(L,(eps1 - epsbr))
     b2 = np.dot(L,(eps2 - epsbr))
     tb1 = np.trace(b1)/3.0
     tb2 = np.trace(b2)/3.0
     ta1 = 1.0/ ( 1.0 + tb1 )
     ta2 = 1.0/ ( 1.0 + tb2 )
     c1 = eps1-epsbr
     c2 = eps2-epsbr
     tc1 = np.trace(c1)/3.0
     tc2 = np.trace(c2)/3.0
     # alpha1 and 2 are the polarisabilities of 1 and 2 in the effective medium 
     talpha1 = tc1 * ta1
     talpha2 = tc2 * ta2
     error = f1*talpha1 + f2*talpha2
     error = np.abs(error.conjugate() * error)
     # Nasty issue in the powell method, the convergence on tol is given
     # relative to the solution (0.0).  Only a small number is added.
     # So we shift the solution by 1.0, the tol is now relative to 1.0
     return 1.0+error
示例#23
0
def m_step_Q(emd, stationary):
    """
    Computes the optimised state-transition covariance hyperparameters `Q' of
    the natural parameters of the posterior distributions over time. Here
    just one single scalar is considered

    :param container.EMData emd:
        All data pertaining to the EM algorithm.
    :param stationary:
        If 'all' stationary on all thetas is assumed.
    """
    inv_lmbda = 0
    if emd.param_est_eta == 'exact':
        for i in range(1, emd.T):
            lag_one_covariance = emd.sigma_s_lag[i, :, :]
            tmp = emd.theta_s[i, :] - emd.theta_s[i - 1, :]
            inv_lmbda += numpy.trace(emd.sigma_s[i, :, :]) - \
                         2 * numpy.trace(lag_one_covariance) + \
                         numpy.trace(emd.sigma_s[i - 1, :, :]) + \
                         numpy.dot(tmp, tmp)
        emd.Q = inv_lmbda / emd.D / (emd.T - 1) * numpy.identity(emd.D)
    else:
        for i in range(1, emd.T):
            lag_one_covariance = emd.sigma_s_lag[i, :]
            tmp = emd.theta_s[i, :] - emd.theta_s[i - 1, :]
            inv_lmbda += numpy.sum(emd.sigma_s[i]) - \
                         2 * numpy.sum(lag_one_covariance) + \
                         numpy.sum(emd.sigma_s[i - 1]) + \
                         numpy.dot(tmp, tmp)
        emd.Q = inv_lmbda / emd.D / (emd.T - 1) * \
                numpy.identity(emd.D)
    if stationary == 'all':
        emd.Q = numpy.zeros(emd.Q.shape)
示例#24
0
文件: test_utpm.py 项目: eteq/algopy
    def test_pullback(self):
        (D,P,N) = 2,5,10
        A_data = numpy.zeros((D,P,N,N))
        for d in range(D):
            for p in range(P):
                tmp = numpy.random.rand(N,N)
                A_data[d,p,:,:] = numpy.dot(tmp.T,tmp)

                if d == 0:
                    A_data[d,p,:,:] += N * numpy.diag(numpy.random.rand(N))

        A = UTPM(A_data)
        l,Q = UTPM.eigh(A)

        L_data = UTPM._diag(l.data)
        L = UTPM(L_data)

        assert_array_almost_equal(UTPM.dot(Q, UTPM.dot(L,Q.T)).data, A.data, decimal = 13)

        lbar = UTPM(numpy.random.rand(*(D,P,N)))
        Qbar = UTPM(numpy.random.rand(*(D,P,N,N)))

        Abar = UTPM.pb_eigh( lbar, Qbar, A, l, Q)

        Abar = Abar.data[0,0]
        Adot = A.data[1,0]

        Lbar = UTPM._diag(lbar.data)[0,0]
        Ldot = UTPM._diag(l.data)[1,0]

        Qbar = Qbar.data[0,0]
        Qdot = Q.data[1,0]

        assert_almost_equal(numpy.trace(numpy.dot(Abar.T, Adot)), numpy.trace( numpy.dot(Lbar.T, Ldot) + numpy.dot(Qbar.T, Qdot)))
示例#25
0
文件: test_utpm.py 项目: eteq/algopy
    def test_pullback_repeated_eigenvalues(self):
        D,P,N = 2,1,6
        A = UTPM(numpy.zeros((D,P,N,N)))
        V = UTPM(numpy.random.rand(D,P,N,N))

        A.data[0,0] = numpy.diag([2,2,3,3.,4,5])
        A.data[1,0] = numpy.diag([5,1,3,1.,1,3])

        V,Rtilde = UTPM.qr(V)
        A = UTPM.dot(UTPM.dot(V.T, A), V)

        l,Q = UTPM.eigh(A)

        L_data = UTPM._diag(l.data)
        L = UTPM(L_data)

        assert_array_almost_equal(UTPM.dot(Q, UTPM.dot(L,Q.T)).data, A.data, decimal = 13)

        lbar = UTPM(numpy.random.rand(*(D,P,N)))
        Qbar = UTPM(numpy.random.rand(*(D,P,N,N)))

        Abar = UTPM.pb_eigh( lbar, Qbar, A, l, Q)

        Abar = Abar.data[0,0]
        Adot = A.data[1,0]

        Lbar = UTPM._diag(lbar.data)[0,0]
        Ldot = UTPM._diag(l.data)[1,0]

        Qbar = Qbar.data[0,0]
        Qdot = Q.data[1,0]

        assert_almost_equal(numpy.trace(numpy.dot(Abar.T, Adot)), numpy.trace( numpy.dot(Lbar.T, Ldot) + numpy.dot(Qbar.T, Qdot)))
示例#26
0
文件: test_uhf.py 项目: eronca/pyscf
 def test_det_ovlp(self):
     mf = scf.UHF(mol)
     mf.scf()
     s, x = mf.det_ovlp(mf.mo_coeff, mf.mo_coeff, mf.mo_occ, mf.mo_occ)
     self.assertAlmostEqual(s, 1.000000000, 9)
     self.assertAlmostEqual(numpy.trace(x[0]), mf.nelec[0]*1.000000000, 9)
     self.assertAlmostEqual(numpy.trace(x[0]), mf.nelec[1]*1.000000000, 9)
示例#27
0
文件: test_utpm.py 项目: eteq/algopy
    def test_eigh1_pushforward(self):
        (D,P,N) = 2,1,2
        A = UTPM(numpy.zeros((D,P,N,N)))
        A.data[0,0] = numpy.eye(N)
        A.data[1,0] = numpy.diag([3,4])

        L,Q,b = UTPM.eigh1(A)

        assert_array_almost_equal(UTPM.dot(Q, UTPM.dot(L,Q.T)).data, A.data, decimal = 13)

        Lbar = UTPM.diag(UTPM(numpy.zeros((D,P,N))))
        Lbar.data[0,0] = [0.5,0.5]
        Qbar = UTPM(numpy.random.rand(*(D,P,N,N)))

        Abar = UTPM.pb_eigh1( Lbar, Qbar, None, A, L, Q, b)

        Abar = Abar.data[0,0]
        Adot = A.data[1,0]

        Lbar = Lbar.data[0,0]
        Ldot = L.data[1,0]

        Qbar = Qbar.data[0,0]
        Qdot = Q.data[1,0]

        assert_almost_equal(numpy.trace(numpy.dot(Abar.T, Adot)), numpy.trace( numpy.dot(Lbar.T, Ldot) + numpy.dot(Qbar.T, Qdot)))
示例#28
0
 def maxwell_sihvola(self,dielectric_medium,dielecv,shape,L,vf) :
     """Calculate the effective constant permittivity using the maxwell garnett method 
        dielectric_medium is the dielectric constant tensor of the medium
        dielecv is the total frequency dielectric constant tensor at the current frequency
        shape is the name of the current shape
        L is the shapes depolarisation matrix
        vf is the volume fraction of filler
        The routine returns the effective dielectric constant"""
     # Equation 6.29 on page 123 of Sihvola
     # Equation 6.40 gives the averaging over the orientation function
     # See also equation 5.80 on page 102 and equation 4.31 on page 70
     Me = dielectric_medium
     # assume that the medium is isotropic calculate the inverse of the dielectric
     Mem1 = 3.0 / np.trace(Me)
     Mi = dielecv
     # calculate the polarisability matrix x the number density of inclusions
     nA = vf*np.dot( (Mi-Me), np.linalg.inv( self.unit + (Mem1 * np.dot(L, (Mi - Me)))))
     nAL = np.dot((nA),L)
     # average the polarisability over orientation
     nA = np.trace(nA) / 3.0 * self.unit
     # average the polarisability*L over orientation
     nAL = np.trace(nAL) / 3.0 * Mem1 * self.unit
     # Calculate the average polarisation factor which scales the average field 
     # based on equation 5.80
     # <P> = pol . <E>
     pol = np.dot(np.linalg.inv(self.unit - nAL), nA)
     # Meff . <E> = Me . <E> + <P>
     # Meff . <E> = Me. <E> + pol . <E>
     # Meff = Me + pol
     effd         = dielectric_medium + pol
     # Average over orientation
     trace = np.trace(effd) / 3.0 
     effdielec = np.array ( [ [trace, 0, 0], [0,trace,0], [0,0,trace] ] )
     return effdielec 
示例#29
0
        def grad_nlogprob(hypers):
            amp2  = np.exp(hypers[0])
            noise = np.exp(hypers[1])
            ls    = np.exp(hypers[2:])

            chol, corr, grad_corr = memoize(amp2, noise, ls)
            solve   = spla.cho_solve((chol, True), diffs)
            inv_cov = spla.cho_solve((chol, True), np.eye(chol.shape[0]))

            jacobian = np.outer(solve, solve) - inv_cov

            grad = np.zeros(self.D + 2)

            # Log amplitude gradient.
            grad[0] = 0.5 * np.trace(np.dot( jacobian, corr + 1e-6*np.eye(chol.shape[0]))) * amp2

            # Log noise gradient.
            grad[1] = 0.5 * np.trace(np.dot( jacobian, np.eye(chol.shape[0]))) * noise

            # Log length scale gradients.
            for dd in xrange(self.D):
                grad[dd+2] = 1 * np.trace(np.dot( jacobian, -amp2*grad_corr[:,:,dd]*comp[:,dd][:,np.newaxis]/(np.exp(ls[dd]))))*np.exp(ls[dd])

            # Roll in the prior variance.
            #grad -= 2*hypers/self.hyper_prior

            return -grad
    def _prepare_data(self, k_point=None):
        """
        Sets all necessary fields for 1D calculations. Sorts atom indices to improve parallelism.
        :returns: number of atoms, sorted atom indices
        """
        # load powder data for one k
        clerk = AbinsModules.IOmodule(input_filename=self._input_filename,
                                      group_name=AbinsModules.AbinsParameters.powder_data_group)
        powder_data = clerk.load(list_of_datasets=["powder_data"])
        self._a_tensors = powder_data["datasets"]["powder_data"]["a_tensors"][k_point]
        self._b_tensors = powder_data["datasets"]["powder_data"]["b_tensors"][k_point]
        self._a_traces = np.trace(a=self._a_tensors, axis1=1, axis2=2)
        self._b_traces = np.trace(a=self._b_tensors, axis1=2, axis2=3)

        # load dft data for one k point
        clerk = AbinsModules.IOmodule(input_filename=self._input_filename,
                                      group_name=AbinsModules.AbinsParameters.ab_initio_group)
        dft_data = clerk.load(list_of_datasets=["frequencies", "weights"])

        frequencies = dft_data["datasets"]["frequencies"][int(k_point)]
        indx = frequencies > AbinsModules.AbinsConstants.ACOUSTIC_PHONON_THRESHOLD
        self._fundamentals_freq = frequencies[indx]

        self._weight = dft_data["datasets"]["weights"][int(k_point)]

        # free memory
        gc.collect()
示例#31
0
def _set_next_inital_step_size(theta_k1, theta_k):
    num = np.trace((theta_k1 - theta_k) @ (theta_k1 - theta_k))
    den = np.trace((theta_k1 - theta_k)
                   @ (np.linalg.inv(theta_k) - np.linalg.inv(theta_k1)))
    return num / den
示例#32
0
def oriented_bounds(obj, angle_digits=1, ordered=True, normal=None):
    """
    Find the oriented bounding box for a Trimesh

    Parameters
    ----------
    obj : trimesh.Trimesh, (n, 2) float, or (n, 3) float
       Mesh object or points in 2D or 3D space
    angle_digits : int
       How much angular precision do we want on our result.
       Even with less precision the returned extents will cover
       the mesh albeit with larger than minimal volume, and may
       experience substantial speedups.
    ordered : bool
      Return a consistent order for bounds
    normal : None or (3,) float
      Override search for normal on 3D meshes

    Returns
    ----------
    to_origin : (4,4) float
      Transformation matrix which will move the center of the
      bounding box of the input mesh to the origin.
    extents: (3,) float
      The extents of the mesh once transformed with to_origin
    """

    # extract a set of convex hull vertices and normals from the input
    # we bother to do this to avoid recomputing the full convex hull if
    # possible
    if hasattr(obj, 'convex_hull'):
        # if we have been passed a mesh, use its existing convex hull to pull from
        # cache rather than recomputing. This version of the cached convex hull has
        # normals pointing in arbitrary directions (straight from qhull)
        # using this avoids having to compute the expensive corrected normals
        # that mesh.convex_hull uses since normal directions don't matter here
        vertices = obj.convex_hull.vertices
        hull_normals = obj.convex_hull.face_normals
    elif util.is_sequence(obj):
        # we've been passed a list of points
        points = np.asanyarray(obj)
        if util.is_shape(points, (-1, 2)):
            return oriented_bounds_2D(points)
        elif util.is_shape(points, (-1, 3)):
            hull_obj = spatial.ConvexHull(points)
            vertices = hull_obj.points[hull_obj.vertices]
            hull_normals, valid = triangles.normals(
                hull_obj.points[hull_obj.simplices])
        else:
            raise ValueError('Points are not (n,3) or (n,2)!')
    else:
        raise ValueError(
            'Oriented bounds must be passed a mesh or a set of points!')

    # convert face normals to spherical coordinates on the upper hemisphere
    # the vector_hemisphere call effectivly merges negative but otherwise
    # identical vectors
    spherical_coords = util.vector_to_spherical(
        util.vector_hemisphere(hull_normals))
    # the unique_rows call on merge angles gets unique spherical directions to check
    # we get a substantial speedup in the transformation matrix creation
    # inside the loop by converting to angles ahead of time
    spherical_unique = grouping.unique_rows(spherical_coords,
                                            digits=angle_digits)[0]
    min_volume = np.inf
    tic = util.now()

    # matrices which will rotate each hull normal to [0,0,1]
    if normal is None:
        matrices = [np.linalg.inv(transformations.spherical_matrix(*s))
                    for s in spherical_coords[spherical_unique]]
    else:
        # if explicit normal was passed use it
        matrices = [geometry.align_vectors(normal, [0, 0, 1])]

    for to_2D in matrices:
        # apply the transform here
        projected = np.dot(to_2D, np.column_stack(
            (vertices, np.ones(len(vertices)))).T).T[:, :3]

        height = projected[:, 2].ptp()
        rotation_2D, box = oriented_bounds_2D(projected[:, :2])
        volume = np.product(box) * height
        if volume < min_volume:
            min_volume = volume
            min_extents = np.append(box, height)
            min_2D = to_2D.copy()
            rotation_2D[:2, 2] = 0.0
            rotation_Z = transformations.planar_matrix_to_3D(rotation_2D)

    # combine the 2D OBB transformation with the 2D projection transform
    to_origin = np.dot(rotation_Z, min_2D)

    # transform points using our matrix to find the translation for the
    # transform
    transformed = transformations.transform_points(vertices,
                                                   to_origin)
    box_center = (transformed.min(axis=0) + transformed.ptp(axis=0) * .5)
    to_origin[:3, 3] = -box_center

    # return ordered 3D extents
    if ordered:
        # sort the three extents
        order = min_extents.argsort()
        # generate a matrix which will flip transform
        # to match the new ordering
        flip = np.eye(4)
        flip[:3, :3] = -np.eye(3)[order]

        # make sure transform isn't mangling triangles
        # by reversing windings on triangles
        if np.isclose(np.trace(flip[:3, :3]), 0.0):
            flip[:3, :3] = np.dot(flip[:3, :3], -np.eye(3))

        # apply the flip to the OBB transform
        to_origin = np.dot(flip, to_origin)
        # apply the order to the extents
        min_extents = min_extents[order]

    log.debug('oriented_bounds checked %d vectors in %0.4fs',
              len(spherical_unique),
              util.now() - tic)

    return to_origin, min_extents
示例#33
0
文件: Imagenes.py 项目: pcs-tfg/tfg
def plot_confusion_matrix(cm,
                          target_names,
                          title='Confusion matrix',
                          cmap=None,
                          normalize=True):
    """
    given a sklearn confusion matrix (cm), make a nice plot

    Arguments
    ---------
    cm:           confusion matrix from sklearn.metrics.confusion_matrix

    target_names: given classification classes such as [0, 1, 2]
                  the class names, for example: ['high', 'medium', 'low']

    title:        the text to display at the top of the matrix

    cmap:         the gradient of the values displayed from matplotlib.pyplot.cm
                  see http://matplotlib.org/examples/color/colormaps_reference.html
                  plt.get_cmap('jet') or plt.cm.Blues

    normalize:    If False, plot the raw numbers
                  If True, plot the proportions

    Usage
    -----
    plot_confusion_matrix(cm           = cm,                  # confusion matrix created by
                                                              # sklearn.metrics.confusion_matrix
                          normalize    = True,                # show proportions
                          target_names = y_labels_vals,       # list of names of the classes
                          title        = best_estimator_name) # title of graph

    Citiation
    ---------
    http://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html

    """

    accuracy = np.trace(cm) / float(np.sum(cm))
    misclass = 1 - accuracy

    if cmap is None:
        cmap = plt.get_cmap('Blues')

    plt.figure(figsize=(8, 6))
    plt.imshow(cm, interpolation='nearest', cmap=cmap)
    plt.title(title)
    plt.colorbar()

    if target_names is not None:
        tick_marks = np.arange(len(target_names))
        plt.xticks(tick_marks, target_names, rotation=45)
        plt.yticks(tick_marks, target_names)

    if normalize:
        cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]

    thresh = cm.max() / 1.5 if normalize else cm.max() / 2
    for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
        if normalize:
            plt.text(j,
                     i,
                     "{:0.4f}".format(cm[i, j]),
                     horizontalalignment="center",
                     color="white" if cm[i, j] > thresh else "black")
        else:
            plt.text(j,
                     i,
                     "{:,}".format(cm[i, j]),
                     horizontalalignment="center",
                     color="white" if cm[i, j] > thresh else "black")

    plt.tight_layout()
    plt.ylabel('Valores reales')
    plt.xlabel('Prediccionesl\nprecision={:0.4f}; errores={:0.4f}'.format(
        accuracy, misclass))
    plt.show()
示例#34
0
def solve(mol,
          nel,
          cf_core,
          cf_gs,
          ImpOrbs,
          chempot=0.,
          n_orth=0,
          FrozenPot=None):
    # cf_core : core orbitals (in AO basis, assumed orthonormal)
    # cf_gs   : guess orbitals (in AO basis)
    # ImpOrbs : cf_gs -> impurity orbitals transformation
    # n_orth  : number of orthonormal orbitals in cf_gs [1..n_orth]

    mol_ = gto.Mole()
    mol_.build(verbose=0)
    mol_.nelectron = nel
    mol_.incore_anyway = True

    cfx = cf_gs
    Sf = mol.intor_symmetric('cint1e_ovlp_sph')
    Hc  = mol.intor_symmetric('cint1e_kin_sph') \
        + mol.intor_symmetric('cint1e_nuc_sph') \
        + FrozenPot

    occ = np.zeros((cfx.shape[1], ))
    occ[:nel / 2] = 2.

    # core contributions
    dm_core = np.dot(cf_core, cf_core.T) * 2
    jk_core = scf.hf.get_veff(mol, dm_core)
    e_core  =     np.trace(np.dot(Hc, dm_core)) \
            + 0.5*np.trace(np.dot(jk_core, dm_core))

    # transform integrals
    Sp = np.dot(cfx.T, np.dot(Sf, cfx))
    Hp = np.dot(cfx.T, np.dot(Hc, cfx))
    jkp = np.dot(cfx.T, np.dot(jk_core, cfx))
    intsp = ao2mo.outcore.full_iofree(mol, cfx)

    # orthogonalize cf [virtuals]
    cf = np.zeros((cfx.shape[1], ) * 2, )
    if n_orth > 0:
        assert (n_orth <= cfx.shape[1])
        assert (np.allclose(np.eye(n_orth), Sp[:n_orth, :n_orth]))
    else:
        n_orth = 0

    cf[:n_orth, :n_orth] = np.eye(n_orth)
    if n_orth < cfx.shape[1]:
        val, vec = sla.eigh(-Sp[n_orth:, n_orth:])
        idx = -val > 1.e-12
        U = np.dot(vec[:,idx]*1./(np.sqrt(-val[idx])), \
                   vec[:,idx].T)
        cf[n_orth:, n_orth:] = U

    # define ImpOrbs projection
    Xp = np.dot(ImpOrbs, ImpOrbs.T)

    # Si = np.dot(ImpOrbs.T, np.dot(Sp, ImpOrbs))
    # Mp = np.dot(ImpOrbs, np.dot(sla.inv(Si), ImpOrbs.T))
    Np = np.dot(Sp, Xp)
    # print np.allclose(Np, np.dot(Np, np.dot(Mp, Np)))

    # HF calculation
    mol_.energy_nuc = lambda *args: mol.energy_nuc() + e_core
    mf = scf.RHF(mol_)
    #mf.verbose = 4
    mf.mo_coeff = cf
    mf.mo_occ = occ
    mf.get_ovlp = lambda *args: Sp
    mf.get_hcore = lambda *args: Hp + jkp - 0.5 * chempot * (Np + Np.T)
    mf._eri = ao2mo.restore(8, intsp, cfx.shape[1])

    nt = scf.newton(mf)
    #nt.verbose = 4
    nt.max_cycle_inner = 1
    nt.max_stepsize = 0.25
    nt.ah_max_cycle = 32
    nt.ah_start_tol = 1.0e-12
    nt.ah_grad_trust_region = 1.0e8
    nt.conv_tol_grad = 1.0e-6

    nt.kernel()
    cf = nt.mo_coeff
    if not nt.converged:
        raise RuntimeError('hf failed to converge')
    mf.mo_coeff = nt.mo_coeff
    mf.mo_energy = nt.mo_energy
    mf.mo_occ = nt.mo_occ

    # MP2 solution
    mp2solver = mp.DFMP2(mf)
    mp2solver.verbose = 5
    mp2solver.kernel()

    rdm1 = mp2solver.make_rdm1()
    rdm2 = mp2solver.make_rdm2()

    # transform rdm's to original basis
    tei = ao2mo.restore(1, intsp, cfx.shape[1])
    rdm1 = np.dot(cf, np.dot(rdm1, cf.T))
    rdm2 = np.einsum('ai,ijkl->ajkl', cf, rdm2)
    rdm2 = np.einsum('bj,ajkl->abkl', cf, rdm2)
    rdm2 = np.einsum('ck,abkl->abcl', cf, rdm2)
    rdm2 = np.einsum('dl,abcl->abcd', cf, rdm2)

    ImpEnergy = +0.25 *np.einsum('ij,jk,ki->', 2*Hp+jkp, rdm1, Xp) \
                +0.25 *np.einsum('ij,jk,ki->', 2*Hp+jkp, Xp, rdm1) \
                +0.125*np.einsum('ijkl,ijkm,ml->', tei, rdm2, Xp) \
                +0.125*np.einsum('ijkl,ijml,mk->', tei, rdm2, Xp) \
                +0.125*np.einsum('ijkl,imkl,mj->', tei, rdm2, Xp) \
                +0.125*np.einsum('ijkl,mjkl,mi->', tei, rdm2, Xp)

    Nel = np.trace(np.dot(np.dot(rdm1, Sp), Xp))

    return Nel, ImpEnergy
示例#35
0
 def distance(A, B):
     return np.trace(A - B)**2
示例#36
0
 def _duality_gap(self, p, theta_k, S, _lambdas):
     return np.trace(S @ theta_k) + self.nsfunc.eval(theta_k, _lambdas) - p
示例#37
0
 def _quad_approx(self, theta_k1, theta_k, S, t_k):
     return self.sfunc.eval(theta_k, S) + \
            np.trace((theta_k1 - theta_k) @ (S - np.linalg.inv(theta_k))) + \
            (1 / (2 * t_k)) * np.linalg.norm((theta_k1 - theta_k), ord='fro') ** 2
示例#38
0
文件: var.py 项目: lleoiu/climopy
                eigrange = [ntime - neof, ntime - 1]  # eigenvalues to get
                covar = (xw @ x.T) / nspace
                l, pc = linalg.eigh(covar,
                                    eigvals=eigrange,
                                    eigvals_only=False)

            # Store in big arrays
            # NOTE: We store projection of PC onto data to get 1 standard
            # deviation associated with the PC rather than actual eigenvector,
            # because eigenvector values may be damped by area weighting.
            pc = (pc - pc.mean(axis=0)) / pc.std(axis=0)  # standardize pcs
            proj = (x.T @ pc) / ntime  # (space by time) x (time by neof)
            pcs[i, :, :, 0] = pc.T[::-1, :]  # neof by time
            projs[i, :, 0, :] = proj.T[::-1, :]  # neof by space
            if percent:  # *percent explained* rather than total
                evals[i, :, 0, 0] = (100.0 * l[::-1] / np.trace(covar))
            else:
                evals[i, :, 0, 0] = l[::-1]  # actual eigenvalues

        # Replace context data with new dimension inserted on left side
        context.replace_data(pcs, projs, evals, nstars, insert_left=1)

    # Return data restored to original dimensionality
    return context.data


def eot(data, neof=5):  # noqa
    """
    EOTs, whatever they are.

    Warning
示例#39
0
def get_DOP(recv_pos, sat_positions):
    Q = get_Q(recv_pos, sat_positions)
    return np.sqrt(np.trace(Q))
示例#40
0
def Query_Call(C, D):
    return np.trace(C) + np.trace(D)
示例#41
0
def calcAngularDistance(gt_rot, pr_rot):
    rotDiff = np.dot(gt_rot, np.transpose(pr_rot))
    trace = np.trace(rotDiff)
    return np.rad2deg(np.arccos((trace - 1.0) / 2.0))
示例#42
0
def get_HDOP(recv_pos, sat_positions):
    Q = get_Q(recv_pos, sat_positions)
    return np.sqrt(np.trace(Q[:2, :2]))
示例#43
0
def ndfs(X, **kwargs):
    """
    This function implement unsupervised feature selection using nonnegative spectral analysis, i.e.,
    min_{F,W} Tr(F^T L F) + alpha*(||XW-F||_F^2 + beta*||W||_{2,1}) + gamma/2 * ||F^T F - I||_F^2
    s.t. F >= 0
    
    Input
    -----
    X: {numpy array}, shape (n_samples, n_features)
        input data
    kwargs: {dictionary}
        W: {sparse matrix}, shape {n_samples, n_samples}
            affinity matrix
        alpha: {float}
            Parameter alpha in objective function
        beta: {float}
            Parameter beta in objective function
        gamma: {float}
            a very large number used to force F^T F = I
        F0: {numpy array}, shape (n_samples, n_clusters)
            initialization of the pseudo label matirx F, if not provided
        n_clusters: {int}
            number of clusters
        verbose: {boolean}
            True if user want to print out the objective function value in each iteration, false if not

    Output
    ------
    W: {numpy array}, shape(n_features, n_clusters)
        feature weight matrix
        
    Reference: 
        Li, Zechao, et al. "Unsupervised Feature Selection Using Nonnegative Spectral Analysis." AAAI. 2012.
    """

    # default gamma is 10e8
    if 'gamma' not in kwargs:
        gamma = 10e8
    else:
        gamma = kwargs['gamma']
    # use the default affinity matrix
    if 'W' not in kwargs:
        W = construct_W(X)
    else:
        W = kwargs['W']
    if 'alpha' not in kwargs:
        alpha = 1
    else:
        alpha = kwargs['alpha']
    if 'beta' not in kwargs:
        beta = 1
    else:
        beta = kwargs['beta']
    if 'F0' not in kwargs:
        if 'n_clusters' not in kwargs:
            print >> sys.stderr, "either F0 or n_clusters should be provided"
        else:
            # initialize F
            n_clusters = kwargs['n_clusters']
            F = kmeans_initialization(X, n_clusters)
    else:
        F = kwargs['F0']
    if 'verbose' not in kwargs:
        verbose = False
    else:
        verbose = kwargs['verbose']

    n_samples, n_features = X.shape

    # initialize D as identity matrix
    D = np.identity(n_features)
    I = np.identity(n_samples)

    # build laplacian matrix
    L = np.array(W.sum(1))[:, 0] - W

    max_iter = 1000
    obj = np.zeros(max_iter)
    for iter_step in range(max_iter):
        # update W
        T = np.linalg.inv(
            np.dot(X.transpose(), X) + beta * D + 1e-6 * np.eye(n_features))
        W = np.dot(np.dot(T, X.transpose()), F)
        # update D
        temp = np.sqrt((W * W).sum(1))
        temp[temp < 1e-16] = 1e-16
        temp = 0.5 / temp
        D = np.diag(temp)
        # update M
        M = L + alpha * (I - np.dot(np.dot(X, T), X.transpose()))
        M = (M + M.transpose()) / 2
        # update F
        denominator = np.dot(M,
                             F) + gamma * np.dot(np.dot(F, F.transpose()), F)
        temp = np.divide(gamma * F, denominator)
        F = F * np.array(temp)
        temp = np.diag(np.sqrt(np.diag(1 /
                                       (np.dot(F.transpose(), F) + 1e-16))))
        F = np.dot(F, temp)

        # calculate objective function
        obj[iter_step] = np.trace(np.dot(np.dot(
            F.transpose(), M), F)) + gamma / 4 * np.linalg.norm(
                np.dot(F.transpose(), F) - np.identity(n_clusters), 'fro')

        if verbose:
            print('obj at iter ' + str(iter_step + 1) + ': ' +
                  str(obj[iter_step]))

        if iter_step >= 1 and math.fabs(obj[iter_step] -
                                        obj[iter_step - 1]) < 1e-3:
            break
    return W
Hd = (pr * Sy)

Uk = LA.expm(-1j * H0)
U = LA.expm(-1j * Hd)
lx = []
ly = []
lz = []
tt = []
ent = []
#input(basis(2**(Ns-1))s)

for i in range(1000):
    psi = dot(Uk, dot(U, psi))
    lx.append(dot(np.conjugate(psi).T, dot(Sx, psi))[0, 0])
    ly.append(dot(np.conjugate(psi).T, dot(Sy, psi))[0, 0])
    lz.append(dot(np.conjugate(psi).T, dot(Sz, psi))[0, 0])
    p = ptr(dot(psi, np.conjugate(psi).T), basis(2**(Ns - 1)), 1, I)
    ent.append(1 - np.trace(dot(p, p)))
    tt.append(i)

plt.plot(tt, lx, 'o', label='x-' + str(th) + '-' + str(phi) + '-' + str(k))
plt.plot(tt, ly, 'o', label='y')
plt.plot(tt, lz, 'o', label='z')
plt.legend()
plt.show()
plt.plot(tt,
         ent,
         'o',
         label=str(pr) + '-' + str(th) + '-' + str(phi) + '-' + str(k))
plt.legend()
plt.show()
示例#45
0
    def bind(self, ens, beads, nm, cell, bforce, prng):
        """Binds ensemble beads, cell, bforce, and prng to the dynamics.

        This takes a beads object, a cell object, a forcefield object and a
        random number generator object and makes them members of the ensemble.
        It also then creates the objects that will hold the data needed in the
        ensemble algorithms and the dependency network. Note that the conserved
        quantity is defined in the init, but as each ensemble has a different
        conserved quantity the dependencies are defined in bind.

        Args:
            beads: The beads object from whcih the bead positions are taken.
            nm: A normal modes object used to do the normal modes transformation.
            cell: The cell object from which the system box is taken.
            bforce: The forcefield object from which the force and virial are
                taken.
            prng: The random number generator object which controls random number
                generation.
        """

        super(Dynamics, self).bind(ens, beads, nm, cell, bforce, prng)

        # Checks if the number of mts levels is equal to the dimensionality of the mts weights.
        if (len(self.nmts) != self.forces.nmtslevels):
            raise ValueError(
                "The number of mts levels for the integrator does not agree with the mts_weights of the force components."
            )

        # Strips off depend machinery for easier referencing.
        dself = dd(self)
        dthrm = dd(self.thermostat)
        dbaro = dd(self.barostat)
        dnm = dd(self.nm)
        dens = dd(self.ensemble)

        # n times the temperature (for path integral partition function)
        dself.ntemp = depend_value(name='ntemp',
                                   func=self.get_ntemp,
                                   dependencies=[dens.temp])

        # fixed degrees of freedom count
        fixdof = len(self.fixatoms) * 3 * self.beads.nbeads
        if self.fixcom:
            fixdof += 3

        # first makes sure that the thermostat has the correct temperature and timestep, then proceeds with binding it.
        dpipe(dself.ntemp, dthrm.temp)

        # depending on the kind, the thermostat might work in the normal mode or the bead representation.
        self.thermostat.bind(beads=self.beads,
                             nm=self.nm,
                             prng=prng,
                             fixdof=fixdof)

        # first makes sure that the barostat has the correct stress andf timestep, then proceeds with binding it.
        dpipe(dself.ntemp, dbaro.temp)
        dpipe(dens.pext, dbaro.pext)
        dpipe(dens.stressext, dbaro.stressext)
        self.barostat.bind(beads,
                           nm,
                           cell,
                           bforce,
                           prng=prng,
                           fixdof=fixdof,
                           nmts=len(self.nmts))

        # now that the timesteps are decided, we proceed to bind the integrator.
        self.integrator.bind(self)

        self.ensemble.add_econs(dthrm.ethermo)
        self.ensemble.add_econs(dbaro.ebaro)

        # adds the potential, kinetic enrgy and the cell jacobian to the ensemble
        self.ensemble.add_xlpot(dbaro.pot)
        self.ensemble.add_xlpot(dbaro.cell_jacobian)
        self.ensemble.add_xlkin(dbaro.kin)

        # applies constraints immediately after initialization.
        self.integrator.pconstraints()

        #!TODO THOROUGH CLEAN-UP AND CHECK
        if self.enstype == "nvt" or self.enstype == "npt" or self.enstype == "nst":
            if self.ensemble.temp < 0:
                raise ValueError(
                    "Negative or unspecified temperature for a constant-T integrator"
                )
            if self.enstype == "npt":
                if type(self.barostat) is Barostat:
                    raise ValueError(
                        "The barostat and its mode have to be specified for constant-p integrators"
                    )
                if self.ensemble.pext < 0:
                    raise ValueError(
                        "Negative or unspecified pressure for a constant-p integrator"
                    )
            elif self.enstype == "nst":
                if np.trace(self.ensemble.stressext) < 0:
                    raise ValueError(
                        "Negative or unspecified stress for a constant-s integrator"
                    )
示例#46
0
文件: utils.py 项目: aghriss/TRHPO
def rayleigh_np(X, Lap):
    return np.trace(
        LA.inv(X.T.dot(X) + EPS * np.eye(X.shape[1])).dot(X.T.dot(Lap.dot(X))))
示例#47
0
def main(argv):

    # start timer
    start = time.time()
    start_localtime = time.localtime()

    if len(argv) > 1:
        config_PSI.image_dir = sys.argv[1] + "/"

    # load data
    labels = pd.read_csv(config_PSI.data_dir + "labels.csv")
    non_image_features = pd.read_csv(config_PSI.data_dir +
                                     "all_non_image_features.csv")

    # create dict for labels
    labels_dict = dict()
    for index, row in labels.iterrows():
        if labels.loc[index, "Diagnosis"] == 1.0:
            labels_dict[labels.loc[index, "RID"]] = 0
        elif labels.loc[index, "Diagnosis"] == 2.0:
            labels_dict[labels.loc[index, "RID"]] = 1
        elif labels.loc[index, "Diagnosis"] == 3.0:
            labels_dict[labels.loc[index, "RID"]] = 2

    # initialization of results dictionary
    results = {
        "test": {
            "mauc": [],
            "bca": [],
            "acc": [],
            "ari": [],
            "auc_converters": [],
            "bca_converters": [],
            "acc_converters": [],
            "ari_converters": [],
            "mauc_non_converters": [],
            "bca_non_converters": [],
            "acc_non_converters": [],
            "ari_non_converters": []
        }
    }

    # get results for all 30 model iterations (different train-validation-test splits)
    for i in range(30):
        iteration = "k" + str(i)
        print(iteration)
        this_dir = config_PSI.new_dir + iteration

        L = os.listdir(this_dir)
        L.sort()
        model = load_model(this_dir + "/" + L[-1], compile=False)
        print("Loading best model for evaluation: ", L[-1])

        # create results dir for this iteration
        results_dir = config_PSI.output_dir + "k" + str(i)
        create_data_directory(results_dir)
        file = open(results_dir + "/results.txt", 'w')

        # get mean and std for this iteration
        mean = np.load(this_dir + "/mean.npy")
        std = np.load(this_dir + "/std.npy")
        if config_PSI.non_image_data == True:
            non_image_mean = np.load(this_dir + "/non_image_mean.npy")
            non_image_std = np.load(this_dir + "/non_image_std.npy")

        # create test generator
        if config_PSI.non_image_data == False:
            test_generator = PSI_DataGenerator(labels["RID"],
                                               labels_dict,
                                               mean,
                                               std,
                                               batch_size=1,
                                               dim=config_PSI.input_shape,
                                               n_channels=1,
                                               n_classes=3,
                                               shuffle=False)
        else:
            test_generator = PSI_DataGenerator_multiple_inputs(
                labels["RID"],
                labels_dict,
                non_image_features,
                mean,
                std,
                non_image_mean,
                non_image_std,
                batch_size=1,
                dim=config_PSI.input_shape,
                dim_non_image=config_PSI.dim_non_imaging_features,
                n_channels=1,
                n_classes=3,
                shuffle=False)

        # make predictions for PSI
        Y_pred = model.predict_generator(test_generator, verbose=0)

        # mauc and bca
        Y_pred_converters = []
        Y_pred_non_converters = []
        y_true = []
        y_true_converters = []
        y_true_non_converters = []
        count = 0
        for id in test_generator.list_IDs:
            y_true.append(labels_dict[id])
            all_links = labels[labels["RID"] == id]
            this_link = all_links.iloc[[0]]
            index = this_link.index.item()
            if this_link.loc[index, "Converter"] == True:
                y_true_converters.append(labels_dict[id])
                Y_pred_converters.append(Y_pred[count, :])
            else:
                y_true_non_converters.append(labels_dict[id])
                Y_pred_non_converters.append(Y_pred[count, :])
            count += 1

        Y_pred_converters = np.asarray(Y_pred_converters)
        Y_pred_non_converters = np.asarray(Y_pred_non_converters)

        y_pred = np.argmax(Y_pred, axis=1)
        y_pred_converters = np.argmax(Y_pred_converters, axis=1)
        y_pred_non_converters = np.argmax(Y_pred_non_converters, axis=1)

        # calculate MAUC based on prediction estimates
        nrSubj = Y_pred.shape[0]
        nrClasses = Y_pred.shape[1]
        hardEstimClass = -1 * np.ones(nrSubj, int)
        zipTrueLabelAndProbs = []
        for s in range(nrSubj):
            pCN = Y_pred[s, 0]
            pMCI = Y_pred[s, 1]
            pAD = Y_pred[s, 2]
            hardEstimClass[s] = np.argmax([pCN, pMCI, pAD])
            zipTrueLabelAndProbs += [(y_true[s], [pCN, pMCI, pAD])]
        zipTrueLabelAndProbs = list(zipTrueLabelAndProbs)
        mauc = MAUC(zipTrueLabelAndProbs, nrClasses)

        # calculate BCA based on prediction estimates
        true_labels = np.asarray(y_true)
        bca = calcBCA(hardEstimClass, true_labels, nrClasses)
        conf = confusion_matrix(hardEstimClass, true_labels, [0, 1, 2])
        file.write("Confusion matrix TEST set:\n")
        file.write(np.array2string(conf, separator=', '))
        print(conf)
        acc = np.trace(conf) / float(np.sum(conf))
        ari = adjusted_rand_score(hardEstimClass, true_labels)

        # calculate MAUC based on prediction estimates ONLY FOR CONVERTERS
        nrSubj = Y_pred_converters.shape[0]
        nrClasses = Y_pred_converters.shape[1]
        hardEstimClass = -1 * np.ones(nrSubj, int)
        zipTrueLabelAndProbs = []
        for s in range(nrSubj):
            pCN = Y_pred_converters[s, 0]
            pMCI = Y_pred_converters[s, 1]
            pAD = Y_pred_converters[s, 2]
            hardEstimClass[s] = np.argmax([pCN, pMCI, pAD])
            zipTrueLabelAndProbs += [(y_true_converters[s] - 1, [pMCI, pAD])]
        zipTrueLabelAndProbs = list(zipTrueLabelAndProbs)
        try:
            auc_converters = MAUC(zipTrueLabelAndProbs, nrClasses - 1)
        except:
            (print("MAUC could not be calculated"))
            auc_converters = 0

        # calculate BCA based on prediction estimates
        true_labels = np.asarray(y_true_converters)
        try:
            bca_converters = calcBCA(hardEstimClass, true_labels, nrClasses)
        except:
            (print("BCA could not be calculated"))
            bca_converters = 0
        conf_converters = confusion_matrix(hardEstimClass, true_labels,
                                           [0, 1, 2])
        file.write("Confusion matrix TEST set CONVERTERS:\n")
        file.write(np.array2string(conf_converters, separator=', '))
        print(conf_converters)
        acc_converters = np.trace(conf_converters) / float(
            np.sum(conf_converters))
        try:
            ari_converters = adjusted_rand_score(hardEstimClass, true_labels)
        except:
            (print("ARI could not be calculated"))
            ari_converters = 0

        # calculate MAUC based on prediction estimates ONLY FOR NON CONVERTERS
        nrSubj = Y_pred_non_converters.shape[0]
        nrClasses = Y_pred_non_converters.shape[1]
        hardEstimClass = -1 * np.ones(nrSubj, int)
        zipTrueLabelAndProbs = []
        for s in range(nrSubj):
            pCN = Y_pred_non_converters[s, 0]
            pMCI = Y_pred_non_converters[s, 1]
            pAD = Y_pred_non_converters[s, 2]
            hardEstimClass[s] = np.argmax([pCN, pMCI, pAD])
            zipTrueLabelAndProbs += [(y_true_non_converters[s],
                                      [pCN, pMCI, pAD])]
        zipTrueLabelAndProbs = list(zipTrueLabelAndProbs)
        mauc_non_converters = MAUC(zipTrueLabelAndProbs, nrClasses)

        # calculate BCA based on prediction estimates
        true_labels = np.asarray(y_true_non_converters)
        bca_non_converters = calcBCA(hardEstimClass, true_labels, nrClasses)
        conf_non_converters = confusion_matrix(hardEstimClass, true_labels,
                                               [0, 1, 2])
        file.write("Confusion matrix TEST set NON CONVERTERS:\n")
        file.write(np.array2string(conf_non_converters, separator=', '))
        print(conf_non_converters)
        acc_non_converters = np.trace(conf_non_converters) / float(
            np.sum(conf_non_converters))
        ari_non_converters = adjusted_rand_score(hardEstimClass, true_labels)

        # results["test"]["loss"].append(score[0])
        results["test"]["mauc"].append(mauc)
        results["test"]["bca"].append(bca)
        results["test"]["acc"].append(acc)
        results["test"]["ari"].append(ari)
        results["test"]["auc_converters"].append(auc_converters)
        results["test"]["bca_converters"].append(bca_converters)
        results["test"]["acc_converters"].append(acc_converters)
        results["test"]["ari_converters"].append(ari_converters)
        results["test"]["mauc_non_converters"].append(mauc_non_converters)
        results["test"]["bca_non_converters"].append(bca_non_converters)
        results["test"]["acc_non_converters"].append(acc_non_converters)
        results["test"]["ari_non_converters"].append(ari_non_converters)

        # report test results
        test_results = f"\nTest\n    MAUC: {mauc:.4f}\n    BCA: {bca:.4f}\n    ACC: {acc:.4f}\n    ARI: {ari:.4f}\n    AUC CONVERTERS: {auc_converters:.4f}\n    BCA CONVERTERS: {bca_converters:.4f}\n    ACC CONVERTERS: {acc_converters:.4f}\n    ARI CONVERTERS: {ari_converters:.4f}\n    MAUC NON CONVERTERS: {mauc_non_converters:.4f}\n    BCA NON CONVERTERS: {bca_non_converters:.4f}\n    ACC NON CONVERTERS: {acc_non_converters:.4f}\n    ARI NON CONVERTERS: {ari_non_converters:.4f}\n    "
        file.write(test_results), print(test_results)

        file.close()

    # end timer
    end = time.time()
    end_localtime = time.localtime()

    np.save(config_PSI.output_dir + "results.npy", results)
    save_results_PSI(results, start, start_localtime, end, end_localtime)

    test_results = {}
    test_results['mauc_mean'] = np.mean(results['test']['mauc'])
    test_results['mauc_std'] = np.std(results['test']['mauc'])
    test_results['bca_mean'] = np.mean(results['test']['bca'])
    test_results['bca_std'] = np.std(results['test']['bca'])
    test_results['acc_mean'] = np.mean(results['test']['acc'])
    test_results['acc_std'] = np.std(results['test']['acc'])
    test_results['ari_mean'] = np.mean(results['test']['ari'])
    test_results['ari_std'] = np.std(results['test']['ari'])

    # CONVERTERS
    test_results['auc_mean_converters'] = np.mean(
        results['test']['auc_converters'])
    test_results['auc_std_converters'] = np.std(
        results['test']['auc_converters'])
    test_results['bca_mean_converters'] = np.mean(
        results['test']['bca_converters'])
    test_results['bca_std_converters'] = np.std(
        results['test']['bca_converters'])
    test_results['acc_mean_converters'] = np.mean(
        results['test']['acc_converters'])
    test_results['acc_std_converters'] = np.std(
        results['test']['acc_converters'])
    test_results['ari_mean_converters'] = np.mean(
        results['test']['ari_converters'])
    test_results['ari_std_converters'] = np.std(
        results['test']['ari_converters'])

    # NON CONVERTERS
    test_results['mauc_mean_non_converters'] = np.mean(
        results['test']['mauc_non_converters'])
    test_results['mauc_std_non_converters'] = np.std(
        results['test']['mauc_non_converters'])
    test_results['bca_mean_non_converters'] = np.mean(
        results['test']['bca_non_converters'])
    test_results['bca_std_non_converters'] = np.std(
        results['test']['bca_non_converters'])
    test_results['acc_mean_non_converters'] = np.mean(
        results['test']['acc_non_converters'])
    test_results['acc_std_non_converters'] = np.std(
        results['test']['acc_non_converters'])
    test_results['ari_mean_non_converters'] = np.mean(
        results['test']['ari_non_converters'])
    test_results['ari_std_non_converters'] = np.std(
        results['test']['ari_non_converters'])

    print('mean mauc:', '%.3f' % test_results['mauc_mean'], ' (',
          '%.3f' % test_results['mauc_std'], ')')
    print('mean bca:', '%.3f' % test_results['bca_mean'], ' (',
          '%.3f' % test_results['bca_std'], ')')
    print('mean acc:', '%.3f' % test_results['acc_mean'], ' (',
          '%.3f' % test_results['acc_std'], ')')
    print('mean ari:', '%.3f' % test_results['ari_mean'], ' (',
          '%.3f' % test_results['ari_std'], ')')
    print('mean auc converters:', '%.3f' % test_results['auc_mean_converters'],
          ' (', '%.3f' % test_results['auc_std_converters'], ')')
    print('mean bca converters:', '%.3f' % test_results['bca_mean_converters'],
          ' (', '%.3f' % test_results['bca_std_converters'], ')')
    print('mean acc converters:', '%.3f' % test_results['acc_mean_converters'],
          ' (', '%.3f' % test_results['acc_std_converters'], ')')
    print('mean ari converters:', '%.3f' % test_results['ari_mean_converters'],
          ' (', '%.3f' % test_results['ari_std_converters'], ')')
    print('mean mauc non converters:',
          '%.3f' % test_results['mauc_mean_non_converters'], ' (',
          '%.3f' % test_results['mauc_std_non_converters'], ')')
    print('mean bca non converters:',
          '%.3f' % test_results['bca_mean_non_converters'], ' (',
          '%.3f' % test_results['bca_std_non_converters'], ')')
    print('mean acc non converters:',
          '%.3f' % test_results['acc_mean_non_converters'], ' (',
          '%.3f' % test_results['acc_std_non_converters'], ')')
    print('mean ari non converters:',
          '%.3f' % test_results['ari_mean_non_converters'], ' (',
          '%.3f' % test_results['ari_std_non_converters'], ')')

    np.save(config_PSI.output_dir + "test_results.npy", test_results)
示例#48
0
    def _get_irrep_gens(self, k, m, p_u, p_h, p_v):
        """return generators of irrep
        see 
            M. Damnjonovic and I. Milosevic, 
            "Line Groups in Physics: Theory and Applications to Nanotures and 
            Polymers", Lect. Notes Phys. 801 (Springer Berlin Heidelberg 2010)
        Args:
            k: k vector [0 ~ 0.5] pi is ommited
            m: angular momentum, [0, 1]
            p_[u, h, v]: parities
        """
        def _E(k, m, p_u, p_h, p_v):
            expik = np.exp(2j * np.pi * k)
            chars = [
                np.array([[expik]]), expik * np.eye(2),
                np.array([[expik, 0], [0, 1 / expik]])
            ]
            return chars

        def _C_2(k, m, p_u, p_h, p_v):
            expm_2 = np.exp(1j * np.pi * m)
            chars = [np.array([[1]]), -np.eye(2), expm_2 * np.eye(2)]
            return chars

        def _U_d(k, m, p_u, p_h, p_v):
            anti_eye = np.matrix('0 1; 1 0')
            chars = [np.array([[p_u]]), anti_eye, anti_eye]
            return chars

        def _sigma_v(k, m, p_u, p_h, p_v):
            chars = [
                np.array([[p_v]]),
                np.matrix('1 0; 0 -1'),
                p_v * np.array([[1., 0], [0, np.exp(1j * np.pi * m)]])
            ]
            return chars

        def _get_irrep(irrep_gen, powers):
            """ return powers of charaters
            """
            from copy import deepcopy
            irrep_list = deepcopy(irrep_gen[0])
            for gen_i, irrep_gen in enumerate(irrep_gen[1:]):
                for irrep_i, irrep in enumerate(irrep_gen):
                    irrep_list[irrep_i] = np.mat(irrep_list[irrep_i]) * \
                                          np.mat(irrep) ** powers[gen_i]
            return irrep_list

        irrep_gen = [_E, _C_2, _U_d, _sigma_v]
        irrep_gen = [func(k, m, p_u, p_h, p_v) for func in irrep_gen]
        # print irrep_gen[0][-1]

        char_table = np.zeros((3, len(self.operators)), dtype=np.cfloat)

        irrep_list = [[None] * len(self.operators) for _ in range(3)]

        for op_i, op in enumerate(self.operators):
            ind = self.indice[op_i]
            irreps = _get_irrep(irrep_gen, ind)

            for irrep_kind, irrep in enumerate(irreps):
                char_table[irrep_kind, op_i] = np.trace(irrep)
                irrep_list[irrep_kind][op_i] = irrep

        return irrep_list
示例#49
0
result = np.zeros((10, 10), dtype=np.int32)
for i in range(10):
    for j in range(1, 101):
        # 未知パターンの読み込み
        pat_file = "mnist/train/" + str(i) + "/" + str(i) + "_" + str(
            j) + ".jpg"
        pat_img = np.asarray(Image.open(pat_file).convert('L')).astype(
            np.float32)

        # 最近傍法
        min_val = float('inf')
        ans = 0
        for k in range(10):
            # SSDの計算
            t = train_img[k].flatten()
            p = pat_img.flatten()
            dist = np.dot((t - p).T, (t - p))  # ssd

            # 最小値の探索
            if dist < min_val:
                min_val = dist
                ans = k

        # 結果の出力
        result[i][ans] += 1
        print(i, j, "->", ans)

print("\n [混合行列]")
print(result)
print("\n 正解数 ->", np.trace(result))
        SYSTEM = np.matrix(EXP1) * np.matrix(SYSTEM) * np.matrix(EXP2)
        SYSTEM = np.matrix(H) * np.matrix(SYSTEM) * np.matrix(
            np.matrix.getH(H))
        SYSTEM = np.matrix(SHIFT) * np.matrix(SYSTEM) * np.matrix(
            np.matrix.getH(SHIFT))
    ##################################
    #MEASUREMENT
    p = np.zeros((POSITIONS, 1), dtype=complex)
    for i in range(POSITIONS):
        ket_i = np.zeros((POSITIONS, 1))
        ket_i[i] = 1
        M_i = np.outer(ket_i, ket_i)
        M_P = np.kron(M_i, I_C)
        for q in range(CHAINLENGTH - 1):
            M_P = np.kron(M_P, I_C)
        p[i] = np.trace(M_P * SYSTEM * np.matrix.getH(M_P))

    COPY = list(np.absolute(p[0:POSITIONS:2]))
    LABELS = list(range(-STEPS, STEPS + 2, 2))
    ##################################
    #ANALYSIS
    variance = 0
    mean = 0
    for i in range(0, STEPS + 1):
        mean = mean + COPY[i] * LABELS[i]
    mean = mean / (STEPS + 1)
    for j in range(0, STEPS + 1):
        variance = variance + COPY[j] * (LABELS[j] - mean) * (LABELS[j] - mean)
    results.append(variance)
    ##################################
plt.plot(results)
示例#51
0
def find_best_threshold(data, strategy=None, roc=False):
    best_thres, best_acc = None, None
    THRES = 11
    points = []
    points4 = [[] for _ in range(4)]
    for config in itertools.combinations_with_replacement(
            range(THRES),
            len(data[1][0]) - 1):
        prev = 0
        thresholds = []
        for pos in config:
            thresholds.append(pos - prev)
            prev = pos
        thresholds.append(THRES - 1 - prev)
        thresholds = list(map(lambda x: x / (THRES - 1), thresholds))
        if debug:
            print(thresholds)
        conf_matrix = get_conf_matrix(data[0],
                                      data[1],
                                      thresholds,
                                      strategy=strategy)
        normalized = conf_matrix / conf_matrix.sum()

        if multi:
            for i in range(4):
                # 134 x 1000 x 4 and 134 x 4
                one_logits = data[0][:, :, i, None]
                rest_logits = 1 - one_logits
                logits = np.concatenate([one_logits, rest_logits], axis=2)
                one_labels = data[1][:, i, None]
                rest_labels = 1 - one_labels
                labels = np.concatenate([one_labels, rest_labels], axis=1)
                thresholds_ = [thresholds[i], 1 - thresholds[i]]
                conf_matrix_ = get_conf_matrix(logits,
                                               labels,
                                               thresholds_,
                                               strategy=strategy)
                normalized_ = conf_matrix_ / conf_matrix_.sum()
                tp, fp = normalized_[0, :]
                fn, tn = normalized_[1, :]
                tpr = tp / (tp + fn)
                fpr = fp / (fp + tn)
                points4[i].append((fpr, tpr))
        else:
            tp, fp = normalized[0, :]
            fn, tn = normalized[1, :]
            tpr = tp / (tp + fn)
            fpr = fp / (fp + tn)
            points.append((fpr, tpr))

        acc = np.trace(normalized)
        if best_acc is None or acc > best_acc:
            best_acc = acc
            best_thres = thresholds
    if roc:
        if multi:
            for i, points in enumerate(points4):
                get_auc(points, strategy='Class-%d vs All' % (i + 1))
        else:
            get_auc(points, strategy=strategy)
    return best_thres, best_acc
def top_to_bottom(data, max_depth, max_epoch, batch_size, lr, u=None):
    dx = data[0][:, 0].size  # Dimension of latent space
    hier_LDS = []  # Create list that will store Linear Dynamics per level in the hierarchy
    hier_nu = []
    losses = []

    # In[]
    x_ols = np.hstack([data[idx][:, :-1] for idx in range(len(data))]).T
    y_ols = np.hstack([data[idx][:, 1:] for idx in range(len(data))]).T

    if u is None:
        du = 1
        u_ols = np.ones((1, x_ols[:, 0].size))
    else:
        u_ols = np.hstack(u)
        du = u_ols[:, 0].size
    x_ols = np.hstack((x_ols, u_ols.T))
# In[]:
    "For the root node, we can easily find the LDS that minimizes MSE using the OLS estimator"
    beta = np.linalg.solve((x_ols.T @ x_ols), x_ols.T @ y_ols)
    hier_LDS.append(np.expand_dims(np.array(beta.T), axis=2))
    
    y_ols = y_ols - x_ols @ beta  # Send residual to be fit by trees deeper down the hierarchy
    losses.append(np.trace(y_ols.T @ y_ols) / y_ols[:, 0].size)
    del beta

    # In[]
    for level in range(1, max_depth):
        print(level)
        K = int(2 ** level)  # number of nodes at current level

        "Initialize parameters of dynamics at current level"
        lds = 1e-5 * npr.rand(dx, dx + du, K) - 1e-5 / 2

        num_hp = int(2 ** (level - 1))  # number of hyperplanes that need to be learned

        "Initialize hyperplanes"
        nu = np.zeros((dx + 1, num_hp))
        nu[:-1, :] = npr.randn(dx, num_hp)
        
# In[4]:
        "Compute weights of ancestral path"
        if level == 1:
            ancestor_weights = np.ones((K, x_ols[:, 0].size))
        else:
            ancestor_weights = np.ones((K, x_ols[:, 0].size))
            for j in range(level - 1):
                hyper_planes = np.array(hier_nu[j])
                counter = 0
                temp_array = np.zeros((2 * hyper_planes[0, :].size, x_ols[:, 0].size))
                for k in range(hyper_planes[0, :].size):
                    temp_array[counter, :] = 1/(1+np.exp(-np.matrix(hyper_planes[:, k]) * x_ols.T))
                    temp_array[counter + 1, :] = 1 - temp_array[counter, :]
                    counter += 2
                ancestor_weights = np.multiply(ancestor_weights, np.repeat(temp_array, int(K/temp_array[:, 0].size),
                                                                           axis=0))

        # Optimize
        lds, nu, y_ols, loss = utils.optimize_tree(y_ols, x_ols, lds, nu, ancestor_weights, K, num_hp, max_epoch,
                                       batch_size, lr, 1)
        losses = losses + loss
        hier_LDS.append(np.array(lds.data.numpy()))
        if level != 0:
            hier_nu.append(np.array(nu.data.numpy()))
    
    return hier_LDS, hier_nu, losses
print("\ndimention of matrix A:", A.ndim)
print("\ndimention of matrix B:", B.ndim)
print("\ndimention of complex matrix:", com.ndim)
#Get the shape of arrays
print("\nshape of matrix A:", A.shape)
print("\nshape of matrix B:", B.shape)
print("\nshape of complex matrix:", com.shape)
#Arithmetic operations using functions
print("\nADDITION of A,B matrices:\n", np.add(A, B))
print("\nSUBTRACTION of A,B matrices:\n", np.subtract(A, B))
print("\nMULTIPLICATION of A,B matrices:\n", np.matmul(A, B))
print("\nDIVISION of A,B matrices\n", np.divide(B, A))
# Rank of a matrix
print("Rank of A:", np.linalg.matrix_rank(A))
# Trace of matrix A
print("\nTrace of A:", np.trace(A))
# Determinant of a matrix
print("\nDeterminant of A:", np.linalg.det(A))
# Inverse of matrix A
print("\nInverse of A:\n", np.linalg.inv(A))
print("\nMatrix A raised to power 3:\n", np.linalg.matrix_power(A, 3))
#calculation of eign values and vectors using eig() function
a, b = rkv.eig(A)
print("\nEigen values of matrix A are:\n", a)
print("\nEigen vectors of matrix A are:\n", b)
#calculation of eign values and vectors using eigh() function
c, d = rkv.eigh(com)
print("\nEigen values of complex matrix are:\n", c)
print("\nEigen vectors of complex matrix are:\n", d)
#Creating vectors
vector1 = 2 + 3j
# stacking
a
b = [[1, 2], [3, 4], [4, 5]]
b

np.vstack((a, b))
np.hstack((a, b))

np.hsplit(a, 2)

# inverse of a array
a = np.random.randint(1, 5, size=(2, 2))
np.linalg.inv(a)

np.eye(3)
np.trace(a)

# solving a quadratic equations
a
c = np.array([1, 2])
np.linalg.solve(a, c)

np.mat('1 2; 3 4')

np.linalg.eigvals(a)
np.linalg.eig(a)  # eigen values and eigen vectors

# singular value decomposition:
u, sigma, v = np.linalg.svd(a, full_matrices=False)
u
sigma
示例#55
0
文件: tdscf.py 项目: yfyh2013/pyscf
    def initfockbuild(self):
        """
        Using Roothan's equation to build a Initial Fock matrix and
        Transformation Matrices

        Returns:
            fmat: float or complex
                Fock matrix in Lowdin AO basis
            c_am: float
                Transformation Matrix |AO><MO|
            v_lm: float
                Transformation Matrix |LAO><MO|
        """
        start = time.time()
        n_occ = int(sum(self.ks.mo_occ)/2)
        err = 100
        it = 0
        self.h = self.ks.get_hcore()
        s = self.s.copy()
        x = self.x.copy()
        sx = np.dot(s,x)
        dm_lao = 0.5*transmat(self.ks.get_init_guess(self.ks.mol, \
        self.ks.init_guess), sx).astype(complex)

        if isinstance(self.ks.diis, lib.diis.DIIS):
            self.adiis = self.ks.diis
        elif self.ks.diis:
            self.adiis = diis.SCF_DIIS(self.ks, self.ks.diis_file)
            self.adiis.space = self.ks.diis_space
            self.adiis.rollback = self.ks.diis_space_rollback
        else:
            self.adiis = None

        fmat, jmat, kmat = self.fockbuild(dm_lao)
        dm_lao_old = dm_lao
        etot = self.energy(dm_lao,fmat, jmat, kmat)+ self.enuc

        while (err > self.conv_tol):
            # Diagonalize F in the lowdin basis
            eigs, v_lm = np.linalg.eig(fmat)
            idx = eigs.argsort()
            eigs.sort()
            v_lm = v_lm[:,idx].copy()
            # Fill up the density in the MO basis and then Transform back
            rho = 0.5*np.diag(self.ks.mo_occ).astype(complex)
            dm_lao = transmat(rho,v_lm,-1)
            etot_old = etot
            etot = self.energy(dm_lao,fmat, jmat, kmat)
            fmat, jmat, kmat = self.fockbuild(dm_lao,it)
            err = abs(etot-etot_old)
            logger.debug(self, "Ne: %f", np.trace(rho))
            logger.debug(self, "Iteration: %d         Energy: %.11f      \
            Error = %.11f", it, etot, err)
            it += 1
            if it > self.ks.max_cycle:
                logger.log(self, "Max cycle of SCF reached: %d\n Exiting TDSCF. Please raise ks.max_cycle", it)
                quit()
        rho = 0.5*np.diag(self.ks.mo_occ).astype(complex)
        dm_lao = transmat(rho,v_lm,-1)
        c_am = np.dot(self.x,v_lm)
        logger.log(self, "Ne: %f", np.trace(rho))
        logger.log(self, "Converged Energy: %f", etot)
        # logger.log(self, "Eigenvalues: %f", eigs.real)
        # print "Eigenvalues: ", eigs.real
        end = time.time()
        logger.info(self, "Initial Fock Built time: %f", end-start)
        return fmat, c_am, v_lm
示例#56
0
def get_accuracy(logits, labels, thresholds, strategy=None):
    conf_matrix = get_conf_matrix(logits,
                                  labels,
                                  thresholds,
                                  strategy=strategy)
    return np.trace(conf_matrix / conf_matrix.sum())
示例#57
0
            # Fourier transform to real space
            pair['Guij_tmp'] +=  Gku[si,sj]*phase*wk # ij gets exp(+i k R) 
            pair['Gdji_tmp'] +=  Gkd[sj,si]/phase*wk # ji gets exp(-i k R)

    # summ reduce partial results of mpi nodes
    for pair in pairs:
        comm.Reduce(pair['Guij_tmp'],pair['Guij'],root=root_node)
        comm.Reduce(pair['Gdji_tmp'],pair['Gdji'],root=root_node)

    if rank==root_node:
        # The Szunyogh-Lichtenstein formula
        for pair in pairs:
            i,j = pair['aiij']
            pair['Jijz'].append( 
                         np.trace(np.dot(
                         np.dot(Hs[i],pair['Guij']),
                         np.dot(Hs[j],pair['Gdji'])
                              )))

#----------------------------------------------------------------------

# evaluation of the contour integral on the root node
# and saveing output of the calculation
if rank==root_node:
    for pair in pairs:
        pair['Jijz'] = np.array(pair['Jijz']) 
        pair['Jij']  = np.trapz(np.imag(pair['Jijz']*cont.we)/(2*np.pi))
    end = timer()
    np.savetxt(args.outfile,
               np.array([ [nl.norm(p['rij']),
                           p['Jij']*sisl.unit_convert('eV','Ry')*1000]+
                          p['aiij']+list(p['offset'])+list(p['rij'])                         
示例#58
0
"""TRAINING OF THE CLASSIFIER AND CLASSIFICATION OF THE TEST SET"""
#Learning rate
alpha = 2
# TRAINING
theta, J = fTrain_LogisticReg(X_train, Y_train, alpha)
# CLASSIFICATION OF THE TEST SET
Y_test_hat = fClassify_LogisticReg(X_test, theta)
Y_test_pred = Y_test_hat >= 0.5

#Confusion matrix
M = confusion_matrix(Y_test, Y_test_pred)
print(M)

# ACCURACY AND F-SCORE

accuracy = np.trace(M) / sum(sum(M))
Precision = M[0, 0] / (M[0, 0] + M[1, 0])
Recall = M[0, 0] / (M[0, 0] + M[0, 1])
FScore = 2 * ((Precision * Recall) / (Precision + Recall))

print('the accuracy :', accuracy * 100)
print('the Fscore :', FScore)
#%%
#the ROC curve and the area under the curve

T, F = ROC(Y_test, Y_test_hat)
L = np.arange(0, 1, 0.001)
plt.plot(F, T)
plt.plot(L, L)
plt.title("ROC curve")
plt.xlabel("False Positive Rate")
示例#59
0
def calcOkadaDisplacementStress(x, y, z, event_srcmod, lambda_lame, mu_lame):
    """Calculate strains and stresses from SRCMOD event with Okada (1992).
        Calculate nine element symmetric elastic strain and stress tensors at
        observation coordinates using Okada (1992).  Dislocation parameters are
        given an event_srcmod dictionary which contains the geometry and slip
        distribution for a given SRCMOD event.
        Inputs:
        x: List of x-coordinates of observation points (meters)
        y: List of y-coordinates of observation points (meters)
        z: List of z-coordinates of observation points (meters, negative down)
        event_srcmod: Dictionary with SRCMOD event parameters for one event
        lambda_lame: Lame's first parameter (Pascals)
        mu_lame: Lame's second parameter, shear modulus (Pascals)
        Returns:
        strains, stresses: Lists of 3x3 numpy arrays with full strain
        and stress tensors at each 3d set of obervation coordinates
        """
    strains = []
    stresses = []
    displacements = []
    mindistance = []

    # Define the material parameter that Okada's Greens functions is sensitive too
    alpha = (lambda_lame + mu_lame) / (lambda_lame + 2 * mu_lame)

    for j in range(len(x)):
        strain = np.zeros((3, 3))
        stress = np.zeros((3, 3))
        displacement = np.zeros([3])
        distance = []
        for i in range(len(event_srcmod['x1'])):

            # Translate and (un)rotate observation coordinates to get a local reference frame in which top edge of fault is aligned with x-axis
            x_rot, y_rot = RotateCoords(x[j], y[j], event_srcmod['x1Utm'][i],
                                        event_srcmod['y1Utm'][i],
                                        -1.0 * event_srcmod['angle'][i])
            # get rotated fault coordinates (otherwise fault patch might be offset on x-axis)
            x2_f, y2_f = RotateCoords(event_srcmod['x2Utm'][i],
                                      event_srcmod['y2Utm'][i],
                                      event_srcmod['x1Utm'][i],
                                      event_srcmod['y1Utm'][i],
                                      -1.0 * event_srcmod['angle'][i])
            x_fault1 = np.min([0., x2_f])
            x_fault2 = np.max([0., x2_f])
            assert (x_fault2 - x_fault1) - event_srcmod['length'][i] < 100
            # Calculate elastic deformation using Okada 1992 (BSSA)
            # Seven arguments to DC3DWrapper are required:
            # alpha = (lambda + mu) / (lambda + 2 * mu)
            # xo = 3-vector representing the observation point (x, y, z in the original)
            # depth = the depth of the fault origin
            # dip = the dip-angle of the rectangular dislocation surface
            # strike_width = the along-strike range of the surface (al1,al2 in the original)
            # dip_width = the along-dip range of the surface (aw1, aw2 in the original)
            # dislocation = 3-vector representing the direction of motion on the surface (DISL1, DISL2, DISL3)
            success, uvec, gradient_tensor = dc3dwrapper(
                alpha,
                [x_rot[0], y_rot[0], z[j]
                 ],  #observation depth has to be negative
                event_srcmod['z1'][i],
                event_srcmod['dip'][i],
                [x_fault1, x_fault2],
                [-1.0 * event_srcmod['width'][i], 0],
                [
                    event_srcmod['slipStrike'][i], event_srcmod['slipDip'][i],
                    0.0
                ])
            # Tensor algebra definition of strain
            cur_straintmp = 0.5 * (gradient_tensor.T + gradient_tensor)
            #
            cur_strain = RotateTensor(cur_straintmp,
                                      1.0 * event_srcmod['angle'][i])
            strain += cur_strain
            # Tensor algebra constituitive relationship for elasticity
            stress += (lambda_lame * np.eye(cur_strain.shape[0]) *
                       np.trace(cur_strain) + 2. * mu_lame * cur_strain)
            displacement += RotateDisplacements(uvec,
                                                1.0 * event_srcmod['angle'][i])
            distance.append(
                np.sqrt(np.power(x_rot[0], 2.) + np.power(y_rot[0], 2.)))

        mindistance.append(np.min(np.array(distance)))
        strains.append(strain)
        stresses.append(stress)
        displacements.append(displacement)
    return displacements, strains, stresses, mindistance
示例#60
0
文件: tdscf.py 项目: yfyh2013/pyscf
def trdot(A,B):
    C = np.trace(np.dot(A,B))
    return C