Ejemplo n.º 1
0
def random_cov(d, diff=None):
    """Generate random covariance matrix.
    
    Generates a random covariance matrix, or two dependent covariance matrices
    if the argument `diff` is given.
    
    """
    S = 0.8*np.random.randn(d,d)
    copy_triu_to_tril(S)
    np.fill_diagonal(S,0)
    mineig = linalg.eigvalsh(S, eigvals=(0,0))[0]
    drand = 0.8*np.random.randn(d)
    if mineig < 0:
        S += np.diag(np.exp(drand)-mineig)
    else:
        S += np.diag(np.exp(drand))
    if not diff:
        return S.T
    S2 = S * np.random.randint(2, size=(d,d))*np.exp(diff*np.random.randn(d,d))
    copy_triu_to_tril(S2)
    np.fill_diagonal(S2,0)
    mineig = linalg.eigvalsh(S2, eigvals=(0,0))[0]
    drand += diff*np.random.randn(d)
    if mineig < 0:
        S2 += np.diag(np.exp(drand)-mineig)
    else:
        S2 += np.diag(np.exp(drand))
    return S.T, S2.T
Ejemplo n.º 2
0
def _validate_covars(covars, covariance_type, n_components):
    """Do basic checks on matrix covariance sizes and values
    """
    from scipy import linalg

    if covariance_type == "spherical":
        if len(covars) != n_components:
            raise ValueError("'spherical' covars have length n_components")
        elif np.any(covars <= 0):
            raise ValueError("'spherical' covars must be non-negative")
    elif covariance_type == "tied":
        if covars.shape[0] != covars.shape[1]:
            raise ValueError("'tied' covars must have shape (n_dim, n_dim)")
        elif not np.allclose(covars, covars.T) or np.any(linalg.eigvalsh(covars) <= 0):
            raise ValueError("'tied' covars must be symmetric, " "positive-definite")
    elif covariance_type == "diag":
        if len(covars.shape) != 2:
            raise ValueError("'diag' covars must have shape" "(n_components, n_dim)")
        elif np.any(covars <= 0):
            raise ValueError("'diag' covars must be non-negative")
    elif covariance_type == "full":
        if len(covars.shape) != 3:
            raise ValueError("'full' covars must have shape " "(n_components, n_dim, n_dim)")
        elif covars.shape[1] != covars.shape[2]:
            raise ValueError("'full' covars must have shape " "(n_components, n_dim, n_dim)")
        for n, cv in enumerate(covars):
            if not np.allclose(cv, cv.T) or np.any(linalg.eigvalsh(cv) <= 0):
                raise ValueError("component %d of 'full' covars must be " "symmetric, positive-definite" % n)
    else:
        raise ValueError("covariance_type must be one of " + "'spherical', 'tied', 'diag', 'full'")
Ejemplo n.º 3
0
    def test1(self,n,p,prec,matrix_type,method):
        '''
        check the quality of tridiagonalization.
        '''
        if p==2:
            assert(method=='qr' or method=='sqrtm')
        else:
            assert(method=='qr')
        if prec is not None:
            gmpy2.get_context().precision=prec
        assert(matrix_type in ['array','sparse','mpc'])

        v0=self.gen_randv0(n,p,matrix_type='array')
        H0=self.gen_randomH(n,p,matrix_type=matrix_type)
        if p is not None:
            if method=='qr':
                data,offset=tridiagonalize_qr(H0,q=v0,prec=prec)
            else:
                data,offset=tridiagonalize2(H0,q=v0,prec=prec)
        else:
            data,offset=tridiagonalize(H0,q=v0,prec=prec)
        B=construct_tridmat(data,offset).toarray()
        if sps.issparse(H0):
            H0=H0.toarray()
        H0=complex128(H0)
        e1=eigvalsh(H0)
        e2=eigvalsh(B)
        assert_allclose(e1,e2)
Ejemplo n.º 4
0
 def _compute_eigen(self, eigenvalues_only=False, b=None):
     if eigenvalues_only:
         if b is None:
             if self.__e_val is not None: return self.__e_val
             else:
                 self.__e_val = eigvalsh(self._array)
                 return self.__e_val
         else:
             if self.__e_val_b.has_key(hash(str(b))):
                 return self.__e_val_b[hash(str(b))]
             else:
                 self.__e_val_b[hash(str(b))]=eigvalsh(self._array, b)
                 return self.__e_val_b[hash(str(b))]
     else:
         if b is None:
             if self.__e_val is not None and self.__e_vec is not None:
                 return self.__e_val, self.__e_vec
             else:
                 self.__e_val, self.__e_vec = eigh(self._array)
                 return self.__e_val, self.__e_vec
         else:
             if (self.__e_val_b.has_key(hash(str(b))) and
                 self.__e_vec_b.has_key(hash(str(b)))):
                 return self.__e_val_b[hash(str(b))], self.__e_vec_b[hash(str(b))]
             else:
                 self.__e_val_b[hash(str(b))], self.__e_vec_b[hash(str(b))] \
                    = eigvalsh(self._array, b)
                 self.__e_vec_t_b = self.__e_vec_b[hash(str(b))].T
                 return self.__e_val_b[hash(str(b))], self.__e_vec_b[hash(str(b))]
Ejemplo n.º 5
0
def _validate_covars(covars, cvtype, nmix, n_dim):
    from scipy import linalg
    if cvtype == 'spherical':
        if len(covars) != nmix:
            raise ValueError("'spherical' covars must have length nmix")
        elif np.any(covars <= 0):
            raise ValueError("'spherical' covars must be non-negative")
    elif cvtype == 'tied':
        if covars.shape != (n_dim, n_dim):
            raise ValueError("'tied' covars must have shape (n_dim, n_dim)")
        elif (not np.allclose(covars, covars.T)
              or np.any(linalg.eigvalsh(covars) <= 0)):
            raise ValueError("'tied' covars must be symmetric, "
                             "positive-definite")
    elif cvtype == 'diag':
        if covars.shape != (nmix, n_dim):
            raise ValueError("'diag' covars must have shape (nmix, n_dim)")
        elif np.any(covars <= 0):
            raise ValueError("'diag' covars must be non-negative")
    elif cvtype == 'full':
        if covars.shape != (nmix, n_dim, n_dim):
            raise ValueError("'full' covars must have shape "
                             "(nmix, n_dim, n_dim)")
        for n, cv in enumerate(covars):
            if (not np.allclose(cv, cv.T)
                or np.any(linalg.eigvalsh(cv) <= 0)):
                raise ValueError("component %d of 'full' covars must be "
                                 "symmetric, positive-definite" % n)
Ejemplo n.º 6
0
def _dense_eigs(data, isherm, vecs, N, eigvals, num_large, num_small):
    """
    Internal functions for computing eigenvalues and eigenstates for a dense
    matrix.
    """
    if debug:
        logger.debug(inspect.stack()[0][3] + ": vectors = " + str(vecs))

    evecs = None

    if vecs:
        if isherm:
            if eigvals == 0:
                evals, evecs = la.eigh(data)
            else:
                if num_small > 0:
                    evals, evecs = la.eigh(
                        data, eigvals=[0, num_small - 1])
                if num_large > 0:
                    evals, evecs = la.eigh(
                        data, eigvals=[N - num_large, N - 1])
        else:
            evals, evecs = la.eig(data)
    else:
        if isherm:
            if eigvals == 0:
                evals = la.eigvalsh(data)
            else:
                if num_small > 0:
                    evals = la.eigvalsh(data, eigvals=[0, num_small - 1])
                if num_large > 0:
                    evals = la.eigvalsh(data, eigvals=[N - num_large, N - 1])
        else:
            evals = la.eigvals(data)

    _zipped = list(zip(evals, range(len(evals))))
    _zipped.sort()
    evals, perm = list(zip(*_zipped))

    if vecs:
        evecs = np.array([evecs[:, k] for k in perm])

    if not isherm and eigvals > 0:
        if vecs:
            if num_small > 0:
                evals, evecs = evals[:num_small], evecs[:num_small]
            elif num_large > 0:
                evals, evecs = evals[(N - num_large):], evecs[(N - num_large):]
        else:
            if num_small > 0:
                evals = evals[:num_small]
            elif num_large > 0:
                evals = evals[(N - num_large):]

    return np.array(evals), np.array(evecs)
Ejemplo n.º 7
0
def eigbh(cm,bm,return_vecs=True):
    '''
    Get the eigenvalues and eigenvectors for matrice with block structure specified by block marker.

    Parameters:
        :cm: csr_matrix/bsr_matrix, the input matrix.
        :return_vecs: bool, return the eigenvectors or not.
        :bm: <BlockMarkerBase>, the block marker.

    Return:
        (eigenvalues,eigenvectors) if return vecs==True.
        (eigenvalues) if return vecs==False.
    '''
    EL,UL=[],[]
    is_sparse=sps.issparse(cm)
    for i in xrange(bm.nblock):
        mi=bm.extract_block(cm,(i,i))
        if return_vecs:
            if is_sparse: mi=mi.toarray()
            ei,ui=eigh(mi)
            EL.append(ei)
            UL.append(ui)
        else:
            ei=eigvalsh(mi)
            EL.append(ei)
    if return_vecs:
        return concatenate(EL),sps.block_diag(UL)
    else:
        return concatenate(EL)
Ejemplo n.º 8
0
    def test_nonint(self):
        #get the exact solution.
        h_exact=self.model_exact.hgen.H()
        E_excit=eigvalsh(h_exact)
        Emin_exact=sum(E_excit[E_excit<0])

        #the solution in occupation representation.
        h_occ=self.model_occ.hgen.H()
        Emin=eigsh(h_occ,which='SA',k=1)[0]
        print 'The Ground State Energy for hexagon(t = %s, t2 = %s) is %s, tolerence %s.'%(self.t,self.t2,Emin,Emin-Emin_exact)
        assert_almost_equal(Emin_exact,Emin)

        #the solution through updates
        H_serial=op2collection(op=self.model_occ.hgen.get_opH())
        H=get_H(H=H_serial,hgen=self.expander)
        H2,bm2=get_H_bm(H=H_serial,hgen=self.expander2,bstr='QM')
        Emin=eigsh(H,k=1,which='SA')[0]
        Emin2=eigsh(H2,k=1,which='SA')[0]
        print 'The Ground State Energy is %s, tolerence %s.'%(Emin,Emin-Emin2)
        assert_almost_equal(Emin_exact,Emin)
        assert_almost_equal(Emin_exact,Emin2)

        #the solution through dmrg.
        bmgen=get_bmgen(self.expander3.spaceconfig,'QM')
        dmrgegn=DMRGEngine(hchain=H_serial,hgen=self.expander3,tol=0,bmg=bmgen,symmetric=True)
        EG2=dmrgegn.run_finite(endpoint=(5,'<-',0),maxN=[10,20,30,40,40],tol=0)[-1]
        assert_almost_equal(Emin_exact,EG2*H_serial.nsite,decimal=4)
def _gauss_lobatto_mesh(n):
    from numpy import arange, asarray, diag, sqrt, zeros
    from scipy.linalg import eigvalsh

    if n < 2:
        raise ValueError("n must be > 1")
    if n == 2:
        return asarray((-1., 1.)), asarray((1., 1.))

    xi = zeros(n)
    wi = zeros(n)
    Pn = zeros(n)
    i = arange(1, n - 2)
    b = sqrt((i * (2. + i)) / (3. + 4.*i * (2. + i)))  # coeff for Jacobi Poly with a=b=1

    M = diag(b, -1) + diag(b, 1)
    xi[1:n - 1] = eigvalsh(M)
    xi[0] = -1.; xi[-1] = 1.

    Pim2 = 1.  # P_{i-2}
    Pim1 = xi  # P_{i-1}
    for i in range(2, n):  # want P_{n-1}
        wi = (1. / i) * ((2 * i - 1) * xi * Pim1 - (i - 1) * Pim2)
        Pim2 = Pim1
        Pim1 = wi
    wi = 2. / (n * (n - 1) * wi ** 2)
    wi[0] = wi[-1] = 2. / (n * (n - 1))
    return xi, wi
Ejemplo n.º 10
0
def quad_form(x, P):
    x,P = map(Expression.cast_to_const, (x,P))
    # Check dimensions.
    n = P.size[0]
    if P.size[1] != n or x.size != (n,1):
        raise Exception("Invalid dimensions for arguments.")
    if x.curvature.is_constant():
        return x.T*P*x
    elif P.curvature.is_constant():
        np_intf = intf.get_matrix_interface(np.ndarray)
        P = np_intf.const_to_matrix(P.value)
        # Replace P with symmetric version.
        P = (P + P.T)/2
        # Check if P is PSD.
        eigvals = LA.eigvalsh(P)
        if min(eigvals) > 0:
            P_sqrt = Constant(LA.sqrtm(P).real)
            return square(norm2(P_sqrt*x))
        elif max(eigvals) < 0:
            P_sqrt = Constant(LA.sqrtm(-P).real)
            return -square(norm2(P_sqrt*x))
        else:
            raise Exception("P has both positive and negative eigenvalues.")
    else:
        raise Exception("At least one argument to quad_form must be constant.")
Ejemplo n.º 11
0
def bethe_hessian_spectrum(G, r=None):
    """Returns eigenvalues of the Bethe Hessian matrix of G.

    Parameters
    ----------
    G : Graph
       A NetworkX Graph or DiGraph

    r : float
       Regularizer parameter

    Returns
    -------
    evals : NumPy array
      Eigenvalues

    See Also
    --------
    bethe_hessian_matrix

    References
    ----------
    .. [1] A. Saade, F. Krzakala and L. Zdeborová
      "Spectral clustering of graphs with the bethe hessian",
       Advances in Neural Information Processing Systems. 2014.
    """
    from scipy.linalg import eigvalsh
    return eigvalsh(nx.bethe_hessian_matrix(G, r).todense())
Ejemplo n.º 12
0
 def _posterior(self, X, Y, alpha0, w0):
     '''
     Iteratively refitted least squares method using l_bfgs_b or newton_cg.
     Finds MAP estimates for weights and Hessian at convergence point
     '''
     n_samples,n_features  = X.shape
     if self.solver == 'lbfgs_b':
         f = lambda w: _logistic_loss_and_grad(w,X[:,:-1],Y,alpha0)
         w = fmin_l_bfgs_b(f, x0 = w0, pgtol = self.tol_solver,
                           maxiter = self.n_iter_solver)[0]
     elif self.solver == 'newton_cg':
         f    = _logistic_loss
         grad = lambda w,*args: _logistic_loss_and_grad(w,*args)[1]
         hess = _logistic_grad_hess               
         args = (X[:,:-1],Y,alpha0)
         w    = newton_cg(hess, f, grad, w0, args=args,
                          maxiter=self.n_iter, tol=self.tol)[0]
     else:
         raise NotImplementedError('Liblinear solver is not yet implemented')
         
     # calculate negative of Hessian at w
     xw    = np.dot(X,w)
     s     = expit(xw)
     R     = s * (1 - s)
     Hess  = np.dot(X.T*R,X)    
     Alpha = np.ones(n_features)*alpha0
     if self.fit_intercept:
         Alpha[-1] = np.finfo(np.float16).eps
     np.fill_diagonal(Hess, np.diag(Hess) + Alpha)
     e  =  eigvalsh(Hess)        
     return w,1./e
Ejemplo n.º 13
0
def check_dfunc(dfunc,nband,D,Gap=0.,method='eval'):
    '''
    Checking for dfunc.

    dfunc:
        the hybridization function.
    nband:
        the number of bands.
    method:
        the method for checking.
        * `pauli` -> pauli decomposition for 2D.
        * `eval` -> check for eigenvalue.
    '''
    ion()
    if nband!=2 and method=='pauli':
        warnings.warn('Checking pauli components is not allowed for non-2band bath!')
        method='eval'
    if ndim(D)==0:
        D=[-D,D]
    wlist=linspace(D[0],D[1],1000)
    if nband==1:
        dl=array([dfunc(w) for w in wlist])
    elif method=='eval':
        dl=array([eigvalsh(dfunc(w)) for w in wlist])
    elif method=='pauli':
        dl=array([s2vec(dfunc(w)) for w in wlist])
    plot(wlist,dl)
    if method=='eval':
        legend([r'$\rho_%s$'%i for i in xrange(nband)])
    elif method=='pauli':
        legend([r'$\rho_0$',r'$\rho_x$',r'$\rho_y$',r'$\rho_z$'])
    pdb.set_trace()
Ejemplo n.º 14
0
    def check_RCF(self):
        """Tests for right canonical form.
        Uses the criteria listed in sub-section 3.1, theorem 1 of arXiv:quant-ph/0608197v2.
        """
        rnsOK = True
        ls_trOK = True
        ls_herm = True
        ls_pos = True
        ls_diag = True

        for n in xrange(1, self.N + 1):
            rnsOK = rnsOK and sp.allclose(
                self.r[n],
                sp.eye(self.r[n].shape[0]),
                atol=self.eps * 2,
                rtol=0)
            ls_herm = ls_herm and sp.allclose(
                self.l[n] - m.H(self.l[n]), 0, atol=self.eps * 2)
            ls_trOK = ls_trOK and sp.allclose(
                sp.trace(self.l[n]), 1, atol=self.eps * 2, rtol=0)
            ls_pos = ls_pos and all(la.eigvalsh(self.l[n]) > 0)
            ls_diag = ls_diag and sp.allclose(self.l[n],
                                              sp.diag(self.l[n].diagonal()))

        normOK = sp.allclose(self.l[self.N], 1., atol=self.eps, rtol=0)

        return (rnsOK, ls_trOK, ls_pos, ls_diag, normOK)
Ejemplo n.º 15
0
def boolean_fermi_surface(h,write=True,output_file="BOOL_FERMI_MAP.OUT",
                    e=0.0,nk=50,nsuper=1,reciprocal=False,
                    delta=None):
  """Calculates the Fermi surface of a 2d system"""
  if h.dimensionality!=2: raise  # continue if two dimensional
  hk_gen = h.get_hk_gen() # gets the function to generate h(k)
  kxs = np.linspace(-nsuper,nsuper,nk)  # generate kx
  kys = np.linspace(-nsuper,nsuper,nk)  # generate ky
  kdos = [] # empty list
  kxout = []
  kyout = []
  if reciprocal: R = h.geometry.get_k2K() # get matrix
  # setup a reasonable value for delta
  if delta is None:
    delta = 8./np.max(np.abs(h.intra))/nk
  for x in kxs:
    for y in kxs:
      r = np.matrix([x,y,0.]).T # real space vectors
      k = np.array((R*r).T)[0] # change of basis
      hk = hk_gen(k) # get hamiltonian
      evals = lg.eigvalsh(hk) # diagonalize
      de = np.abs(evals - e) # difference with respect to fermi
      de = de[de<delta] # energies close to fermi
      if len(de)>0: kdos.append(1.0) # add to the list
      else: kdos.append(0.0) # add to the list
      kxout.append(x)
      kyout.append(y)
  if write:  # optionally, write in file
    f = open(output_file,"w") 
    for (x,y,d) in zip(kxout,kyout,kdos):
      f.write(str(x)+ "   "+str(y)+"   "+str(d)+"\n")
    f.close() # close the file
  return (kxout,kyout,d) # return result
Ejemplo n.º 16
0
Archivo: gmm.py Proyecto: FNNDSC/nipy
    def unweighted_likelihood_(self, x):
        """
        return the likelihood of each data for each component
        the values are not weighted by the component weights

        Parameters
        ----------
        x: array of shape (n_samples,self.dim)
           the data used in the estimation process

        Returns
        -------
        like, array of shape(n_samples,self.k)
          unweighted component-wise likelihood
        """
        n = x.shape[0]
        like = np.zeros((n, self.k))

        for k in range(self.k):
            # compute the data-independent factor first
            w = - np.log(2 * np.pi) * self.dim
            m = np.reshape(self.means[k], (1, self.dim))
            b = self.precisions[k]
            if self.prec_type == 'full':
                w += np.log(eigvalsh(b)).sum()
                dx = m - x
                q = np.sum(np.dot(dx, b) * dx, 1)
            else:
                w += np.sum(np.log(b))
                q = np.dot((m - x) ** 2, b)
            w -= q
            w /= 2
            like[:, k] = np.exp(w)
        return like
Ejemplo n.º 17
0
def get_bands(h,output_file="BANDS2D_",nindex=[-1,1],
               nk=50,nsuper=1,reciprocal=False,
               operator=None,k0=[0.,0.]):
  """ Calculate band structure"""
  if h.dimensionality!=2: raise  # continue if two dimensional
  hk_gen = h.get_hk_gen() # gets the function to generate h(k)
  kxs = np.linspace(-nsuper,nsuper,nk)+k0[0]  # generate kx
  kys = np.linspace(-nsuper,nsuper,nk)+k0[1]  # generate ky
  kdos = [] # empty list
  kxout = []
  kyout = []
  if reciprocal: R = h.geometry.get_k2K() # get matrix
  else:  R = np.matrix(np.identity(3)) # get identity
  # setup a reasonable value for delta
  # setup the operator
  if operator is None:
    operator = np.matrix(np.identity(h.intra.shape[0]))
  os.system("rm -f "+output_file+"*") # delete previous files
  fo = [open(output_file+"_"+str(i)+".OUT","w") for i in nindex] # files        
  for x in kxs:
    for y in kxs:
      r = np.matrix([x,y,0.]).T # real space vectors
      k = np.array((R*r).T)[0] # change of basis
      hk = hk_gen(k) # get hamiltonian
      evals = lg.eigvalsh(hk) # eigenvalues
      epos = sorted(evals[evals>0]) # positive energies
      eneg = -np.array(sorted(np.abs(evals[evals<0]))) # negative energies
      for (i,j) in zip(nindex,range(len(nindex))): # loop over bands
        fo[j].write(str(x)+"     "+str(y)+"   ")
        if i>0: fo[j].write(str(epos[i-1])+"\n")
        if i<0: fo[j].write(str(eneg[abs(i)-1])+"\n")
  [f.close() for f in fo] # close file
Ejemplo n.º 18
0
def disp_rel(kx_vec, ky_vec, m1=1):
    m, n = kx_vec.shape
    omega = np.zeros((4, m, n))
    for i in range(m):
        for j in range(n):
            kx = kx_vec[i,j]
            ky = ky_vec[i,j]

            km = 0.5*(sqrt(3)*ky - kx)
            kp = 0.5*(sqrt(3)*ky + kx)
            skx = sin(kx)
            skp = sin(kp)
            skm = sin(km)
            ckx = cos(kx)
            ckp = cos(kp)
            ckm = cos(km)
            sq3 = sqrt(3)

            K = np.array([
                [4, 0, 2*1j*skx - 1j*skp + 1j*skm - 2*ckx - ckp - ckm, 0],
                [0, 2*sq3, 0, -sq3*1j*skp + sq3*1j*skm - sq3*ckp - sq3*ckm],
                [-2*1j*skx + 1j*skp-1j*skm - 2*ckx - ckp - ckm, 0, 4, 0],
                [0, sq3*1j*skp - sq3*1j*skm - sq3*ckp - sq3*ckm, 0, 2*sq3]])



            M = np.array([[1., 0., 0., 0],
                          [0., 1., 0., 0],
                          [0., 0., m1, 0],
                          [0., 0., 0., m1]])
            vals = LA.eigvalsh(K, M)
            omega[:,i,j] = vals

    return omega
Ejemplo n.º 19
0
def plot_mohr3d(S):
    r"""Plot 3D Mohr circles."""
    
    S3, S2, S1 = eigvalsh(S)
 
    R_maj = 0.5*(S1 - S3)
    cent_maj = 0.5*(S1+S3)
    
    R_min = 0.5*(S2 - S3)
    cent_min = 0.5*(S2 + S3)
    
    R_mid = 0.5*(S1 - S2)
    cent_mid = 0.5*(S1 + S2)
    
    circ1 = plt.Circle((cent_maj,0), R_maj, facecolor='#cce885', lw=3,
                       edgecolor='#5c8037')
    circ2 = plt.Circle((cent_min,0), R_min, facecolor='w', lw=3,
                       edgecolor='#15a1bd')
    circ3 = plt.Circle((cent_mid,0), R_mid, facecolor='w', lw=3,
                       edgecolor='#e4612d')
    plt.axis('image')
    ax = plt.gca()
    ax.add_artist(circ1)
    ax.add_artist(circ2)
    ax.add_artist(circ3)
    ax.set_xlim(S3 - .1*R_maj, S1 + .1*R_maj)
    ax.set_ylim(-1.1*R_maj, 1.1*R_maj)
    plt.xlabel(r"$\sigma$", size=18)
    plt.ylabel(r"$\tau$", size=18)
    #plt.savefig('Mohr_circle_3D.svg')
    plt.show()
Ejemplo n.º 20
0
def call_sdp(c, Fx_list, F0_list):
    '''
    Solve the SDP which minimizes $c^T x$ under the constraint
    $\sum_i Fx_i x_i - F0 \ge 0$ for all (Fx, F0) in (Fx_list, F0_list).
    '''

    # Alternatively, the SDPA library can be used, but this requires
    # interfacing to C libraries.
    #xvec = sdpa.run_sdpa(c, Fx_list, F0_list).

    for (k, (F0, Fx)) in enumerate(zip(F0_list, Fx_list)):
        assert linalg.norm(F0 - F0.conj().T) < 1e-10
        for i in range(Fx.shape[2]):
            assert linalg.norm(Fx[:,:,i] - Fx[:,:,i].conj().T) < 1e-10

    (Fx_list, F0_list) = make_F_real(Fx_list, F0_list)
    Gs = [cvxopt.base.matrix(Fx.reshape(Fx.shape[0]**2, Fx.shape[2])) for Fx in Fx_list]
    hs = [cvxopt.base.matrix(F0) for F0 in F0_list]

    sol = cvxopt.solvers.sdp(cvxopt.base.matrix(c), Gs=Gs, hs=hs)
    xvec = np.array(sol['x']).flatten()

    sol['Gs'] = Gs
    sol['hs'] = hs

    if sol['status'] == 'optimal':
        for (G, h) in zip(Gs, hs):
            G = np.array(G)
            h = np.array(h)
            M = np.dot(G, xvec).reshape(h.shape)
            assert linalg.eigvalsh(h-M)[0] > -1e-7

    return (xvec, sol)
Ejemplo n.º 21
0
Archivo: expr.py Proyecto: c-l/sco
    def convexify(self, x, degree=1):
        """
        Returns an Expression object that represents the convex approximation of
        self at x where degree 1 is an affine approximation and degree 2 is a
        quadratic approximation. If the hessian has negative eigenvalues, the
        hessian is adjusted so that it is positive semi-definite.
        """

        res = None
        if degree == 1:
            A = self.grad(x)
            b = - A.dot(x) + self.eval(x)
            res = AffExpr(A, b)
        elif degree == 2:
            hess = self.hess(x)
            eig_vals = eigvalsh(hess)
            min_eig_val = min(eig_vals)
            if min_eig_val < 0:
                print("    negative hessian detected. adjusting by {0}.".format(-min_eig_val))
                hess = hess - np.eye(hess.shape[0])*min_eig_val
            grad = self.grad(x)
            Q = hess
            A = grad - np.transpose(x).dot(hess)
            b = 0.5*np.transpose(x).dot(hess).dot(x) - grad.dot(x) + self.eval(x)
            res = QuadExpr(Q, A, b)
        else:
            raise NotImplementedError
        return res
 def is_physicial(rho):
     """
     check for physicallity of the density matrix rho
     """
     p = linalg.eigvalsh(rho)
     if not np.allclose(p[p < 0], 0) and not np.allclose(rho.diagonal().imag, 0):
         print("WARNING: Obtained Gibbs denisty matrix i not a positively defined matrix")
Ejemplo n.º 23
0
def laplacian_spectrum(G, weight='weight'):
    """Return eigenvalues of the Laplacian of G

    Parameters
    ----------
    G : graph
       A NetworkX graph

    weight : string or None, optional (default='weight')
       The edge data key used to compute each value in the matrix.
       If None, then each edge has weight 1.

    Returns
    -------
    evals : NumPy array
      Eigenvalues

    Notes
    -----
    For MultiGraph/MultiDiGraph, the edges weights are summed.
    See to_numpy_matrix for other options.

    See Also
    --------
    laplacian_matrix
    """
    from scipy.linalg import eigvalsh
    return eigvalsh(nx.laplacian_matrix(G, weight=weight).todense())
Ejemplo n.º 24
0
def dos2d_ewindow(h,energies=np.linspace(-1.,1.,30),delta=None,info=False,
                    use_green=True,nk=300,mode="adaptive"):
  """Calculate the density of states in certain eenrgy window"""
  ys = [] # density of states
  if delta is None: # pick a good delta value
    delta = 0.1*(max(energies) - min(energies))/len(energies)
  if use_green:
    from green import bloch_selfenergy
    for energy in energies:
      (g,selfe) = bloch_selfenergy(h,nk=nk,energy=energy, delta=delta,
                   mode=mode)
      ys.append(-g.trace()[0,0].imag)
      if info: print("Done",energy)
    write_dos(energies,ys) # write in file
    return
  else: # do not use green function    
    from dosf90 import calculate_dos # import fortran library
    import scipy.linalg as lg
    kxs = np.linspace(0.,1.,nk)
    kys = np.linspace(0.,1.,nk)
    hkgen= h.get_hk_gen() # get hamiltonian generator
    ys = energies*0.
    weight = 1./(nk*nk)
    for ix in kxs:
      for iy in kys:
        k = np.array([ix,iy,0.]) # create kpoint
        hk = hkgen(k) # get hk hamiltonian
        evals = lg.eigvalsh(hk) # get eigenvalues
        ys += weight*calculate_dos(evals,energies,delta) # add this contribution
      if info: print("Done",ix)
    write_dos(energies,ys) # write in file
    return
def electronicEnergy(bonds, pa):
    """Compute the electronic contribution to the ground state energy of a
    polyene.

    Input arguments:
        bonds: a tuple of length equal to the number of carbon atoms in the
            polyene, containing the lengths of the bonds measured relative
            to their equilibrium lengths (i.e., bond[i] = u_{i+2} - u_{i+1}).
        pa: a dictionary containing the values of the system parameters.  These
        parameters must include,
            t0: the "bare" hopping (at equilibrium bond length), in eV
            alpha: the electron-phonon coupling, in units of eV per Angstrom
            chainlength: the number of carbon atoms in the polyene
            boundary: the type of boundary conditions ('periodic' or 'open')

    Returns: the ground state electronic energy of the specified polyene.
            
    Note that the length of the bonds tuple should be equal to the number of
    atoms even in the 'open' boundary condition case, in which the number of
    bonds in the molecule is one less than the number of atoms.  In this last
    case, the last entry of the bonds tuple should be set to 0."""
    
    if not set(['t0','alpha','chainlength','boundary']).issubset(set(pa.keys())):
        raise MissingParameters(set(pa.keys()))
    if len(bonds) != pa['chainlength']:
        raise BondNumberNotEqualChainLength(len(bonds), pa['chainlength'])
    if pa['boundary'] is 'open' and bonds[-1] != 0:
        raise OpenBCImplyLastBondAbsent(bonds[-1])
    energies = eigvalsh(hamiltonianMatrix(bonds, pa))
    return groundStateEnergy(energies)
Ejemplo n.º 26
0
 def check_RCF(self):
     """Tests for right canonical form.
     
     Uses the criteria listed in sub-section 3.1, theorem 1 of arXiv:quant-ph/0608197v2.
     
     This is a consistency check mainly intended for debugging purposes.
     
     FIXME: The tolerances appear to be too tight!
     
     Returns
     -------
     (rnsOK, ls_trOK, ls_pos, ls_diag, normOK) : tuple of bool
         rnsOK: Right orthonormalization is fullfilled (self.r[n] = eye)
         ls_trOK: all self.l[n] have trace 1
         ls_pos: all self.l[n] are positive-definite
         ls_diag: all self.l[n] are diagonal
         normOK: the state it normalized
     """
     rnsOK = True
     ls_trOK = True
     ls_herm = True
     ls_pos = True
     ls_diag = True
     
     for n in xrange(1, self.N + 1):
         rnsOK = rnsOK and sp.allclose(self.r[n], sp.eye(self.r[n].shape[0]), atol=self.eps*2, rtol=0)
         ls_herm = ls_herm and sp.allclose(self.l[n] - m.H(self.l[n]), 0, atol=self.eps*2)
         ls_trOK = ls_trOK and sp.allclose(sp.trace(self.l[n]), 1, atol=self.eps*1000, rtol=0)
         ls_pos = ls_pos and all(la.eigvalsh(self.l[n]) > 0)
         ls_diag = ls_diag and sp.allclose(self.l[n], sp.diag(self.l[n].diagonal()))
     
     normOK = sp.allclose(self.l[self.N], 1., atol=self.eps*1000, rtol=0)
     
     return (rnsOK, ls_trOK, ls_pos, ls_diag, normOK)
Ejemplo n.º 27
0
 def f(self):
     self.find_nearest_neighbours()
     self.assign_start_indexes_to_atoms()
     with open(os.path.join(self.output_path, 'k_points'), 'w') as f:
         f.write('\n'.join(' '.join(map(str, k)) for k in self.k_mesh))
     with open(os.path.join(self.output_path, 'energies'), 'w') \
             as output_f:
         for k in self.k_mesh:
             self.H = zeros((self.H_matrix_dim,
                             self.H_matrix_dim),
                            dtype=complex_)
             for atom_idx, atom in enumerate(self.atoms):
                 atom.count_diagonal_matrix_elements(
                     self.parameters,
                     self.H,
                     mult=self.spin_multiplier)
                 for neighbour_atom_idx, r in self.nn_dict[atom_idx]:
                     neighbour_atom = self.atoms[neighbour_atom_idx]
                     atom.count_hamiltonian_matrix_elements(
                         neighbour_atom,
                         r, k,
                         self.parameters,
                         self.H,
                         mult=self.spin_multiplier)
             # print self.H
             energies = eigvalsh(self.H)
             output_f.write(' '.join(map(str, energies)) + '\n')
Ejemplo n.º 28
0
    def evaluate_eigenvalues_at(self, nodes, component=None, as_matrix=False):
        r"""
        Evaluate the eigenvalues :math:`\lambda_i\left(x\right)` at some grid nodes :math:`\gamma`.

        :param nodes: The grid nodes :math:`\gamma` we want to evaluate the eigenvalues at.
        :param component: The index :math:`i` of the eigenvalue :math:`\lambda_i` that gets evaluated.
        :param as_matrix: Returns the whole matrix :math:`\Lambda` instead of only a list with the eigenvalues :math:`\lambda_i`.
        :return: A sorted list with :math:`N` entries for all the eigenvalues evaluated at the nodes. Or a single value if a component was specified.
        """
        result = []

        # Hack to see if we evaluate at a single value
        if type(nodes) == numpy.ndarray:
            # Max to get rid of singular dimensions
            n = max(nodes.shape)
        else:
            try:
                n = len(nodes)
            except TypeError:
                n = len([nodes])

        # Memory for storing temporary values
        tmppot = numpy.ndarray((n,self.number_components,self.number_components), dtype=numpy.floating)
        tmpew = numpy.ndarray((n,self.number_components), dtype=numpy.floating)

        # evaluate potential
        values = self.evaluate_at(nodes)

        # fill in values
        for row in xrange(0, self.number_components):
            for col in xrange(0, self.number_components):
                tmppot[:, row, col] = values[row*self.number_components + col]

        # calculate eigenvalues assuming hermitian matrix (eigvalsh for stability!)
        for i in xrange(0, n):
            ew = linalg.eigvalsh(tmppot[i,:,:])
            # Sorting the eigenvalues biggest first.
            ew.sort()
            tmpew[i,:] = ew[::-1]

        tmp = [ tmpew[:,index] for index in xrange(0, self.number_components) ]

        if component is not None:
            (row, col) = component
            if row == col:
                result = tmp[row]
            else:
                result = numpy.zeros(tmp[row].shape, dtype=numpy.floating)
        elif as_matrix is True:
            result = []
            for row in xrange(self.number_components):
                for col in xrange(self.number_components):
                    if row == col:
                        result.append(tmp[row])
                    else:
                        result.append( numpy.zeros(tmp[row].shape, dtype=numpy.floating) )
        else:
            result = tmp

        return result
Ejemplo n.º 29
0
def _h_gen_roots_and_weights(n, mu, factor, func):
    """Compute the roots and weights for Gaussian-Hermite quadrature.
    Internal function.
    """
    if n < 1:
        raise ValueError("n must be positive.")
    
    bn = np.sqrt(np.arange(1,n, dtype=np.float64)/factor)
    c = np.diag(bn, -1)
    x = linalg.eigvalsh(c, overwrite_a=True)

    # improve roots by one application of Newton's method
    dy = func(n, x)
    df = factor*n*func(n-1, x)
    x -= dy/df

    df /= df.max()
    w = 1 / (df * df)

    # symmetrize
    w = (w + w[::-1])/2
    x = (x - x[::-1])/2

    # scale w correctly
    w *= np.sqrt(2.0*np.pi/factor) / w.sum()

    if mu:
        return [x, w, mu]
    else:
        return x, w
Ejemplo n.º 30
0
def bf_eigen_windows(test_dict, gen_dict, phepos_dict, OUT_fh, input_header, var_thresh, window):
	'''
	Function to process dict of SNP-gene tests.
	Calculates the genotype correlation matrix for the SNPs tested for each gene using windows around the gene.
	Will calculate a regularized correlation matrix from the Ledoit-Wolf estimator of the covariance matrix.
	Finds the eigenvalues for this matrix.
	Calculates how many eigenvalues are needed to reach the variance threshold.
	This final value will be the effective Bonferroni correction number.
	Will output to file corrected pvalue for best SNP per gene as it goes along.
	'''
	OUT = open(OUT_fh, 'w')
	OUT.write(input_header + '\tBF\tTESTS\n')

	counter = 1.0
	genes = test_dict.keys()
	numgenes = len(genes)
	TSSs = []
	for gene in genes:
		TSSs.append(phepos_dict[gene][0])
	zipped = zip(TSSs, genes)
	zipped.sort()
	TSSs, genes = zip(*zipped)
	for gene in genes:
		perc = (100 * counter / numgenes)
		if (counter % 100) == 0:
			print str(counter) + ' out of ' + str(numgenes) + ' completed ' + '(' + str(round(perc, 3)) + '%)' 

		sys.stdout.flush()
		counter += 1
		snps = np.sort(test_dict[gene]['snps'])
		start = 0
		stop = window
		M = len(snps)
		m_eff = 0
		window_counter = 0
		while start < M:
			if stop - start == 1:
				m_eff += 1
				break ##can't compute eigenvalues for a scalar, so add 1 to m_eff and break from the while loop
			snps_window = snps[start:stop]
			genotypes = []
			for snp in snps_window:
				if snp in gen_dict:
					genotypes.append(gen_dict[snp])
			genotypes = np.matrix(genotypes)
			m, n = np.shape(genotypes)
			gen_corr, alpha = lw_shrink(genotypes)
			window_counter += 1
			eigenvalues = splin.eigvalsh(gen_corr)
			eigenvalues[eigenvalues < 0] = 0
			m_eff += find_num_eigs(eigenvalues, m, var_thresh)
			start += window
			stop += window
			if stop > M:
				stop = M
		OUT.write(test_dict[gene]['line'] + '\t' + str(min(test_dict[gene]['pval'] * m_eff, 1)) + '\t' + str(m_eff) + '\n')
		OUT.flush()
		gc.collect()
	OUT.close()
Ejemplo n.º 31
0
    def sdca(self, Kin, Kout, y, coefs_init):
        n_samples = Kin.shape[0]
        n_dim = Kout.shape[0]

        # For block descent, step size depends on max eigen value of Kout
        # Same as np.linalg.eigvalsh(Kout)[-1]
        Kout_lambda_max = eigvalsh(Kout, eigvals=(n_dim - 1, n_dim - 1))[0]

        # Data
        dsin = get_dataset(Kin, order="c")
        dsout = get_dataset(Kout, order="c")

        # Initialization
        # Used if done in fit
        self.coefs = np.zeros(n_dim*n_samples, dtype=np.float64) if \
           self.coefs_init is None else coefs_init
        # What is below was relegated to fit
        # if self.coefs_init is None:
        #     self.coefs = np.zeros(n_dim*n_samples, dtype=np.float64)
        # elif isinstance(self.coefs_init, str) and self.coefs_init.lower() == "svr":
        #     # Estimate condition median
        #     svr = SVR(C=self.C/2, kernel="precomputed", epsilon=self.eps)
        #     svr.fit(Kin, y)
        #     svr_dual = np.zeros(y.shape)
        #     svr_dual[svr.support_] = svr.dual_coef_[0, :]
        #     self.coefs = np.kron(svr_dual, np.ones(n_dim))
        # else:
        #     self.coefs = self.coefs_init.T.ravel()

        # Array for objective values
        #        inner_obj = np.ones(self.max_iter)

        # Some Parameters
        n_calls = n_samples if self.n_calls is None else self.n_calls
        rng = check_random_state(self.random_state)
        status = np.zeros(1, dtype=np.int16)

        # Call to the solver
        self.time = time.process_time()  # Store beginning time
        _prox_sdca_intercept_fit(self, dsin, dsout, y, self.coefs, self.alpha,
                                 self.C, self.eps, self.stepsize_factor,
                                 self.probs, self.max_iter, self.tol,
                                 self.callback, n_calls, self.max_time,
                                 self.n_gap, self.gap_time_ratio, self.verbose,
                                 rng, status, self.active_set, Kout_lambda_max)
        #                                , inner_obj)
        self.time = time.process_time() - self.time  # Store training time

        # Set coefs
        self.coefs = np.reshape(self.coefs, (n_samples, n_dim)).T

        # Save inner objective values
        #        self.inner_obj = inner_obj[inner_obj < 0]

        # Resolution status
        if status[0] == 1:
            self.status = "Optimal solution found"
        elif status[0] == 2:
            self.status = "Maximum iteration reached"
        elif status[0] == 3:
            self.status = "Maximum time reached"
        else:
            self.status = ""
Ejemplo n.º 32
0
def _check_precision_matrix(precision, covariance_type):
    """Check a precision matrix is symmetric and positive-definite."""
    if not (np.allclose(precision, precision.T)
            and np.all(linalg.eigvalsh(precision) > 0.)):
        raise ValueError("'%s precision' should be symmetric, "
                         "positive-definite" % covariance_type)
Ejemplo n.º 33
0
def test(n, data, check_stab=0):
    '''Return solution'''
    omega = UnitSquareMesh(n, n)
    boundaries = MeshFunction('size_t', omega, 1, 0)
    DomainBoundary().mark(boundaries, 1)
    # Dirichlet Domain
    CompiledSubDomain('near(x[1]*(1-x[1]), 0)').mark(boundaries, 1)
    # Neumann
    CompiledSubDomain('near(x[0], 1)').mark(boundaries, 2)
    # LM domain
    CompiledSubDomain('near(x[0], 0)').mark(boundaries, 3)
    gamma = EmbeddedMesh(boundaries, 3)

    V = VectorFunctionSpace(omega, 'CG', 2)
    Q = FunctionSpace(omega, 'CG', 1)
    # This should be a stable tripplet: DG0, P1, P2
    Y = FunctionSpace(gamma, 'DG', 0)
    W = [V, Q, Y]

    u, p, x = list(map(TrialFunction, W))
    v, q, y = list(map(TestFunction, W))

    Tu, Tv = Trace(u, gamma), Trace(v, gamma)
    # Normal and volume measure of LM
    n = Constant((-1, 0))
    tau = Constant((0, 1))  # Tangent
    dx_ = Measure('dx', domain=gamma)
    # For Neumann term
    ds_ = Measure('ds', domain=omega, subdomain_data=boundaries)

    a = block_form(W, 2)
    a[0][0] = inner(grad(u), grad(v)) * dx + inner(dot(Tu, tau), dot(
        Tv, tau)) * dx_
    a[0][1] = inner(-p, div(v)) * dx
    a[0][2] = inner(dot(Tv, n), x) * dx_
    a[1][0] = inner(-q, div(u)) * dx
    a[2][0] = inner(dot(Tu, n), y) * dx_

    L = block_form(W, 1)
    L[0] = inner(data['f'], v) * dx + inner(dot(Tv, tau), data['g']) * dx_
    # Neumann bit
    L[0] += inner(data['h'], v) * ds_(2)
    L[2] = inner(dot(data['u'], n), y) * dx

    if Y.ufl_element().family() == 'Discontinuous Lagrange':
        assert Y.ufl_element().degree() == 0
        Y_bcs = []
    else:
        # NOTE: this are necessary to obtain a non-singular problem.
        # However, the bdry values for LM are in general not part of the
        # problem data so this bcs hurts convergence
        Y_bcs = [DirichletBC(Y, Constant(0), 'on_boundary')]

    W_bcs = [[DirichletBC(V, data['u'], boundaries, 1)], [], Y_bcs]

    A, b = list(map(ii_assemble, (a, L)))
    A, b = apply_bc(A, b, W_bcs)

    # Check inf-sub stability
    # We do it directly so this can get costly
    if check_stab and sum(Wi.dim() for Wi in W) < 8000:
        # Get the norms for stable problem
        B00 = A[0][0]
        B11 = assemble(inner(p, q) * dx)

        from hsmg.hseig import Hs0Norm
        from scipy.linalg import eigvalsh

        if Y_bcs:
            B22 = Hs0Norm(Y, s=-0.5, bcs=Y_bcs)
        else:
            B22 = Hs0Norm(Y, s=-0.5, bcs=True)
        B22 * (B22.create_vec())
        B22 = B22.matrix

        B = block_diag_mat([B00, B11, B22])

        A, B = list(map(ii_convert, (A, B)))
        print(RED % ('Solving for %d eigenvalues' % A.size(0)))
        lmin, lmax = np.sort(np.abs(eigvalsh(A.array(), B.array())))[[0, -1]]
        print(GREEN % ('lmin = %g, lmax = %g, cond = %g' %
                       (lmin, lmax, lmax / lmin)))

    wh = ii_Function(W)
    solve(ii_convert(A), wh.vector(), ii_convert(b))

    return omega.hmin(), wh
Ejemplo n.º 34
0
def traceMinimum(f,
                 d2f_dxdt,
                 d2f_dx2,
                 x0,
                 t0,
                 tstop,
                 dtstart,
                 deltaX_target,
                 dtabsMax=20.0,
                 dtfracMax=.25,
                 dtmin=1e-3,
                 deltaX_tol=1.2,
                 minratio=1e-2):
    """
    Trace the minimum `xmin(t)` of the function `f(x,t)`, starting at `x0, t0`.

    Parameters
    ----------
    f : callable
        The scalar function `f(x,t)` which needs to be minimized. The input will
        be of the same type as `(x0,t0)`.
    d2f_dxdt, d2f_dx2 : callable
        Functions which return returns derivatives of `f(x)`. `d2f_dxdt` should
        return the derivative of the gradient of `f(x)` with respect to `t`, and
        `d2f_dx2` should return the Hessian matrix of `f(x)` evaluated at `t`.
        Both should take as inputs `(x,t)`.
    x0 : array_like
        The initial starting point. Must be an array even if the potential is
        one-dimensional (in which case the array should have length 1).
    t0 : float
        The initial starting parameter `t`.
    tstop : float
        Stop the trace when `t` reaches `tstop`.
    dtstart : float
        Initial stepsize.
    deltaX_target : float
        The target error in x at each step. Determines the
        stepsize in t by extrapolation from last error.
    dtabsMax : float, optional
    dtfracMax : float, optional
        The largest stepsize in t will be the LARGEST of
        ``abs(dtstart)*dtabsMax`` and ``t*dtfracMax``.
    dtmin : float, optional
        The smallest stepsize we'll allow before assuming the transition ends,
        relative to `dtstart`
    deltaX_tol : float, optional
        ``deltaX_tol*deltaX_target`` gives the maximum error in x
        before we want to shrink the stepsize and recalculate the minimum.
    minratio : float, optional
        The smallest ratio between smallest and largest eigenvalues in the
        Hessian matrix before treating the smallest eigenvalue as zero (and
        thus signaling a saddle point and the end of the minimum).

    Returns
    -------
      X, T, dXdT : array_like
        Arrays of the minimum at different values of t, and
        its derivative with respect to t.
      overX : array_like
        The point beyond which the phase seems to disappear.
      overT : float
        The t-value beyond which the phase seems to disappear.

    Notes
    -----
    In prior versions, `d2f_dx2` was optional and called `d2f`, while `d2f_dxdt`
    was calculated from an optional parameter `df` using finite differences. If
    Neither of these were supplied, they would be calculated directly from
    `f(x,t)` using finite differences. This lead to a messier calling signature,
    since additional parameters were needed to find the finite differences. By
    instead requiring that the derivatives be supplied, the task of creating the
    derivative functions can be delegated to more general purpose routines
    (see e.g. :class:`helper_functions.gradientFunction` and
    :class:`helper_functions.hessianFunction`).

    Also new in this version, `dtmin` and `dtabsMax` are now relative to
    `dtstart`. The idea here is that there should be some required parameter
    that sets the scale, and then optional parameters can set the tolerances
    relative to this scale. `deltaX_target` is now not optional for the same
    reasoning.
    """
    print("traceMinimum t0 = %0.6g" % t0)
    Ndim = len(x0)
    M0 = d2f_dx2(x0, t0)
    minratio *= min(abs(linalg.eigvalsh(M0))) / max(abs(linalg.eigvalsh(M0)))

    def dxmindt(x, t):
        M = d2f_dx2(x, t)
        if abs(linalg.det(M)) < (1e-3 * np.max(abs(M)))**Ndim:
            # Assume matrix is singular
            return None, False
        b = -d2f_dxdt(x, t)
        eigs = linalg.eigvalsh(M)
        try:
            dxdt = linalg.solve(M, b, overwrite_a=False, overwrite_b=False)
            # dxdt = linalg.solve(M,b, overwrite_a=True, overwrite_b=True)
            isneg = ((eigs <= 0).any() or min(eigs) / max(eigs) < minratio)
        except:
            dxdt = None
            isneg = False
        return dxdt, isneg

    xeps = deltaX_target * 1e-2

    def fmin(x, t):
        return optimize.fmin(f,
                             x,
                             args=(t, ),
                             xtol=xeps,
                             ftol=np.inf,
                             disp=False)

    deltaX_tol = deltaX_tol * deltaX_target
    tscale = abs(dtstart)
    dtabsMax = dtabsMax * tscale
    dtmin = dtmin * tscale

    x, t, dt, xerr = x0, t0, dtstart, 0.0
    dxdt, negeig = dxmindt(x, t)
    X, T, dXdT = [x], [t], [dxdt]
    overX = overT = None
    while dxdt is not None:
        sys.stdout.write('.')
        sys.stdout.flush()
        # Get the values at the next step
        tnext = t + dt
        xnext = fmin(x + dxdt * dt, tnext)
        dxdt_next, negeig = dxmindt(xnext, tnext)
        if dxdt_next is None or negeig == True:
            # We got stuck on a saddle, so there must be a phase transition
            # there.
            dt *= .5
            overX, overT = xnext, tnext
        else:
            # The step might still be too big if it's outside of our error
            # tolerance.
            xerr = max(np.sum((x + dxdt * dt - xnext)**2),
                       np.sum((xnext - dxdt_next * dt - x)**2))**.5
            if xerr < deltaX_tol:  # Normal step, error is small
                T.append(tnext)
                X.append(xnext)
                dXdT.append(dxdt_next)
                if overT is None:
                    # change the stepsize only if the last step wasn't
                    # troublesome
                    dt *= deltaX_target / (xerr + 1e-100)
                x, t, dxdt = xnext, tnext, dxdt_next
                overX = overT = None
            else:
                # Either stepsize was too big, or we hit a transition.
                # Just cut the step in half.
                dt *= .5
                overX, overT = xnext, tnext
        # Now do some checks on dt.
        if abs(dt) < abs(dtmin):
            # Found a transition! Or at least a point where the step is really
            # small.
            break
        if dt > 0 and t >= tstop or dt < 0 and t <= tstop:
            # Reached tstop, but we want to make sure we stop right at tstop.
            dt = tstop - t
            x = fmin(x + dxdt * dt, tstop)
            dxdt, negeig = dxmindt(x, tstop)
            t = tstop
            X[-1], T[-1], dXdT[-1] = x, t, dxdt
            break
        dtmax = max(t * dtfracMax, dtabsMax)
        if abs(dt) > dtmax:
            dt = np.sign(dt) * dtmax
    if overT is None:
        overX, overT = X[-1], T[-1]
    sys.stdout.write('\n')
    sys.stdout.flush()
    X = np.array(X)
    T = np.array(T)
    dXdT = np.array(dXdT)
    return _traceMinimum_rval(X, T, dXdT, overX, overT)
Ejemplo n.º 35
0
        A = invT.dot(Y).dot(np.transpose(invT))
        B = invT.dot(X).dot(np.transpose(invT))
        avgA = avgA + A
        avgB = avgB + B
        deltaB = B - np.eye(d)
        A_lst.append(A)
        B_lst.append(B)
        deltaB_lst.append(deltaB)

        A_tensor[j, i, :, :] = A
        B_tensor[j, i, :, :] = B

    avgA = avgA / n_samples
    avgB = avgB / n_samples

    w_avgA = eigvalsh(avgA)
    quant_1 = w_avgA[-1] - w_avgA[-2]
    print('quantity 1', quant_1)

    quant_2 = 0
    for i in range(n_samples):
        # w_A = eigvalsh(A_lst[i])
        l1A = eigvalsh(A_lst[i], eigvals=(d - 1, d - 1))[0]
        arr_2[j, i] = abs(l1A)
        quant_2 = quant_2 + pow(abs(l1A), 6)
    quant_2 = quant_2 / n_samples
    print('quantity 2', quant_2)

    quant_3 = 0
    for i in range(n_samples):
        norm_deltaB = norm(deltaB_lst[i], ord=2)
Ejemplo n.º 36
0
def distance_riemann(A, B):
    return np.sqrt((np.log(eigvalsh(A, B))**2).sum())
Ejemplo n.º 37
0
def get_VBM(params):
	# The VBM is at the gamma point
	kx = 0
	ky = 0
	kz = 0
	# Make the parameters of the TB model
	es = params[0]
	ep = params[1]
	ed = params[2]
	ed_ = params[3]
	vss = params[4]
	vxx = params[5]
	vxy = params[6]
	vsp = params[7]
	vsdS = params[8]
	vpdS = params[9]
	vpdP = params[10]
	vddS = params[11]
	vddP = params[12]
	vddD = params[13]
	v15 = vsdS/(3.0**0.5)
	v18 = 0
	v19 = 0
	v25 = ((3**0.5)*vpdS + vpdP)/(3.0*(3.0**0.5))
	v26 = ((3**0.5)*vpdS - (2.0*vpdP))/(3.0*(3.0**0.5))
	v27 = v25
	v28 = vpdP/(3.0**0.5)
	v29 = -1.0*vpdP/3.0
	v38 = -1.0*v28
	v39 = v29
	v48 = 0.0
	v49 = -2.0*v29
	v55 = ((3.0*vddS)+(2.0*vddP)+(4.0*vddD))/9.0
	v56 = ((3.0*vddS)-vddP-(2.0*vddD))/9.0
	v57 = v56
	v58 = 0.0
	v78 = (vddP-vddD)/3.0
	v59 = -2.0*v78/(3.0**0.5)
	v68 = -1.0*v78
	v79 = (vddP-vddD)/(3.0*(3.0**0.5))
	v69 = v79
	v88 = 2.0*(vddP/3.0)+(vddD/3.0)
	v89 = 0.0
	v99 = v88

	# Get the phase factors for the TB model
	g = 0.25*np.array([cmath.rect(1,(np.pi/2)*(kx+ky+kz)) +cmath.rect(1,(np.pi/2)*(kx-ky-kz)) +cmath.rect(1,(np.pi/2)*(-kx+ky-kz)) +cmath.rect(1,(np.pi/2)*(-kx-ky+kz)),
					   cmath.rect(1,(np.pi/2)*(kx+ky+kz)) +cmath.rect(1,(np.pi/2)*(kx-ky-kz)) -cmath.rect(1,(np.pi/2)*(-kx+ky-kz)) -cmath.rect(1,(np.pi/2)*(-kx-ky+kz)),
					   cmath.rect(1,(np.pi/2)*(kx+ky+kz)) -cmath.rect(1,(np.pi/2)*(kx-ky-kz)) +cmath.rect(1,(np.pi/2)*(-kx+ky-kz)) -cmath.rect(1,(np.pi/2)*(-kx-ky+kz)),
					   cmath.rect(1,(np.pi/2)*(kx+ky+kz)) -cmath.rect(1,(np.pi/2)*(kx-ky-kz)) -cmath.rect(1,(np.pi/2)*(-kx+ky-kz)) +cmath.rect(1,(np.pi/2)*(-kx-ky+kz))],dtype=complex)

	# Make the on-site potential matrix
	self_ham = np.array([[es, 	0.0, 	0.0,  	0.0,	0.0,	0.0,	0.0,	0.0,	0.0],
						 [0.0,	ep, 	0.0,	0.0,	0.0,	0.0,	0.0,	0.0,	0.0],
						 [0.0,	0.0,	ep,	    0.0,	0.0,	0.0,	0.0,	0.0,	0.0],
						 [0.0,	0.0,	0.0,	ep,	    0.0,	0.0,	0.0,	0.0,	0.0],
						 [0.0,	0.0,	0.0,	0.0,	ed,	    0.0,	0.0,	0.0,	0.0],
						 [0.0,	0.0,	0.0,	0.0,	0.0,	ed,   	0.0,	0.0,	0.0],
						 [0.0,	0.0,	0.0,	0.0,	0.0,	0.0,	ed, 	0.0,	0.0],
						 [0.0,	0.0,	0.0,	0.0,	0.0,	0.0,	0.0,	ed_,	0.0],
						 [0.0,	0.0,	0.0,	0.0,	0.0,	0.0,	0.0,	0.0,	ed_]],dtype=complex)

	# Make the interaction potential matrix
	interact_ham = np.array([[vss*g[0], vsp*g[1], vsp*g[2], vsp*g[3], v15*g[3], v15*g[1], v15*g[2], v18*g[0], v19*g[0]],
							[-vsp*g[1], vxx*g[0], vxy*g[3], vxy*g[2], v25*g[2], v26*g[0], v27*g[3], v28*g[1], v29*g[1]],
							[-vsp*g[2], vxy*g[3], vxx*g[0], vxy*g[1], v27*g[1], v25*g[3], v26*g[0],-v28*g[2], v29*g[2]],
							[-vsp*g[3], vxy*g[2], vxy*g[1], vxx*g[0], v26*g[0], v27*g[2], v25*g[1], v48*g[3], v49*g[3]],
							[ v15*g[3],-v25*g[2],-v27*g[1],-v26*g[0], v55*g[0], v56*g[2], v56*g[1], v58*g[3], v59*g[3]],
							[ v15*g[1],-v26*g[0],-v25*g[3],-v27*g[2], v56*g[2], v55*g[0], v56*g[3], v68*g[1], v69*g[1]],
							[ v15*g[2],-v27*g[3],-v26*g[0],-v25*g[1], v56*g[1], v56*g[3], v55*g[0], v78*g[2], v79*g[2]],
							[ v18*g[0],-v28*g[1], v28*g[2],-v48*g[3], v58*g[3], v68*g[1], v78*g[2], v88*g[0], v89*g[0]],
							[ v19*g[0],-v29*g[1],-v29*g[2],-v49*g[3], v59*g[3], v69*g[1], v79*g[2], v89*g[0], v99*g[0]]],dtype=complex)
	
	# Get the complex conjugate of the interaction matrix
	conj_interact_ham = np.conj(interact_ham.T)

	# Make the Hamiltonian out of the on-site potential and the interaction
	num_atoms = 2
	hamiltonian = np.zeros((num_atoms,num_atoms,9,9),dtype=complex)
	for i in range(num_atoms):
		for j in range(num_atoms):
			if i == j:
				hamiltonian[i][j] = self_ham
			if i == j + 1:
				hamiltonian[i][j] = interact_ham
			if j == i + 1:
				hamiltonian[i][j] = conj_interact_ham

	hamiltonian = np.concatenate(hamiltonian,axis=2)
	hamiltonian = np.concatenate(hamiltonian,axis=0)
	es = LA.eigvalsh(hamiltonian)
	# The VBM is the 4th eigenvalue at the gamma point
	VBM = es[3]	
	return VBM
def Entropy_qit(rho):
    rho_e = eigvalsh(rho)
    return -rho_e.dot(np.log2(rho_e))
Ejemplo n.º 39
0
def pcaPlots(prob, kernels=['Linear'], numPC=2, d=0, mu=1, lams=None, linCon=True, covCon=True, predBnd=150, N=25, perc=0.5, split=False, norm=True):

    print('Fair PCA Parameters: numPC=%s, d=%s, mu=%s'%(numPC,d,mu))
    if split:
        prob.splitData(0.8)
        if norm: prob.train, means, stdevs = normalize(prob.train)
    elif norm: prob.data, means, stdevs = normalize(prob.data)
    if linCon and not covCon: B,m = prob.zpca(dimPCA=numPC,d=d,split=split)
    elif covCon: B,m = prob.spca(dimPCA=numPC,addLinear=linCon,mu=mu,d=d,split=split)
    else: B,m = prob.pca(dimPCA=numPC,split=split)
    eigVecs = la.eigh(m.m.dat.T.dot(m.m.dat))[1][:,-numPC:]
    totVar = np.trace(m.m.dat.T.dot(m.m.dat))
    varExp = np.trace(B.T.dot(m.m.dat.T).dot(m.m.dat.dot(B)))

    print(m.m.m.getprosta(mosek.soltype.itr))
    if m.m.m.getprosta(mosek.soltype.itr)!=mosek.prosta.prim_and_dual_feas: return
    print('Top eigenvalues of solution:',np.round(la.eigvalsh(m.m.X)[-numPC:],4))
    print('Correlation with top eigenvectors:',[round(la.norm(b,2),2) for b in B.T.dot(eigVecs)])
    print('Proportion of variance explained:', varExp/totVar)
    print('Proportion of deviation explained:', np.sqrt(varExp/totVar))
    if linCon or covCon:
        if np.max(np.abs((m.m.B.T.dot(m.getZCon(prob.trainSide if split else prob.sideResp).reshape((prob.numFields,1))))))>1e-7:
            print('Linear constraint unsatisfied')
        else:
            print('Linear constraint satisfied')
    
    
    if split:
        if norm: prob.test = normalize(((prob.test-means[:len(prob.test)])/stdevs[:len(prob.test)]+means[:len(prob.test)]).dot(B))[0]
        else: prob.test = normalize(prob.test.dot(B))[0]
    else: prob.data = normalize(prob.data.dot(B))[0]
    prob.numFields = 2
    kernelDat = []
    for kernel in kernels:
        print(kernel+' SVM:')
        if kernel=='Linear': w,svm,err,b = prob.svm(useSRSP=True,outputFlag=True,split=split,lams=lams)
        elif kernel=='Gaussian': w,svm,err,b = prob.svm(useSRSP=True,dual=True,kernel=lambda x,y: math.exp(-la.norm(x-y)**2/2),outputFlag=True,split=split,lams=lams)
        elif kernel=='Polynomial': w,svm,err,b = prob.svm(useSRSP=True,dual=True,conic=False,kernel=lambda x,y: (x.T.dot(y)+1)**2,outputFlag=True,split=split,lams=lams)
        else:
            print('\tIncorrect kernel name')
            continue
        
        if svm.dual:
            pred = svm.K.dot(svm.alpha.flatten()*(2*svm.rsp-1))
            err, b = maxROC(prob.testSide if split else prob.sideResp,pred=pred)
            if numPC>1:
                predIdx = np.argsort(np.abs(pred-b))
                threshPts = prob.test[predIdx[:predBnd]] if split else prob.data[predIdx[:predBnd]]
                #func = lambda x: svm.alpha.T.dot(svm.getK(x))
                #threshPts = np.array([newton_krylov(func,point) for point in threshPts])
                x = threshPts[:,0]
                y = threshPts[:,1]
                meanx = np.mean(x)
                meany = np.mean(y)
                x -= meanx
                y -= meany
                idx = np.argsort(np.arctan2(x,y))
                x = x[idx] + meanx
                y = y[idx] + meany
                x = np.hstack((x,x[:N]))
                y = np.hstack((y,y[:N]))
                cumsum = np.cumsum(np.insert(x, 0, 0)) 
                x = (cumsum[N:] - cumsum[:-N]) / float(N)
                cumsum = np.cumsum(np.insert(y, 0, 0))
                y = (cumsum[N:] - cumsum[:-N]) / float(N)
        else:
            err, b = maxROC(prob.testSide if split else prob.sideResp,svm,prob.test if split else prob.data)
            if numPC>1:
                dat = prob.test if split else prob.data
                x = np.sort(dat[:,0])[[int(0.02*len(dat)),int(0.98*len(dat))]]
                y = np.sort(dat[:,1])[[int(0.02*len(dat)),int(0.98*len(dat))]]
                x = 0.8*(x-np.mean(x))+np.mean(x)
                y = 0.8*(y-np.mean(y))+np.mean(y)
                if x[1]-x[0]>y[1]-y[0]:
                    x = [(b-svm.B[1]*y[0])/svm.B[0],(b-svm.B[1]*y[1])/svm.B[0]]
                    if max(x)-max(dat[:,0])>0.1*(max(dat[:,0])-min(dat[:,0])):
                        idx = np.argmax(x)
                        x[idx] = 1.1*max(dat[:,0])-0.1*min(dat[:,0])
                        y[idx] = (b-svm.B[0]*x[idx])/svm.B[1]
                    if min(dat[:,0])-min(x)>0.1*(max(dat[:,0])-min(dat[:,0])):
                        idx = np.argmin(x)
                        x[idx] = 1.1*min(dat[:,0])-0.1*max(dat[:,0])
                        y[idx] = (b-svm.B[0]*x[idx])/svm.B[1]
                else:
                    y = [(b-svm.B[0]*x[0])/svm.B[1],(b-svm.B[0]*x[1])/svm.B[1]]
                    if max(y)-max(dat[:,1])>0.1*(max(dat[:,1])-min(dat[:,1])):
                        idx = np.argmax(y)
                        y[idx] = 1.1*max(dat[:,1])-0.1*min(dat[:,1])
                        x[idx] = (b-svm.B[1]*y[idx])/svm.B[0]
                    if min(dat[:,1])-min(y)>0.1*(max(dat[:,1])-min(dat[:,1])):
                        idx = np.argmin(y)
                        y[idx] = 1.1*min(dat[:,1])-0.1*max(dat[:,1])
                        x[idx] = (b-svm.B[1]*y[idx])/svm.B[0]
        main,side,xthresh,ythresh = prob.plot(svm,perc=1,thresholds=[b],graph=False,split=split)
        kernelDat.append(kern(kernel,x,y,main,side,xthresh,ythresh))
    fig = graphs(prob.test if split else prob.data,prob.testSide if split else prob.sideResp,kernelDat,perc)
    # winsound.PlaySound("SystemExit", winsound.SND_ALIAS)
    return prob.test if split else prob.data, prob.testSide if split else prob.sideResp, B, kernelDat, w, fig
Ejemplo n.º 40
0
# In[50]:

evals, evecs = la.eig(A)

# In[51]:

evals

# In[52]:

evecs

# In[53]:

la.eigvalsh(A)

# ## Nonlinear equations

# ### Univariate

# In[54]:

x = np.linspace(-2, 2, 1000)

# four examples of nonlinear functions
f1 = x**2 - x - 1
f2 = x**3 - 3 * np.sin(x)
f3 = np.exp(x) - 2
f4 = 1 - x**2 + np.sin(50 / (1 + x**2))
Ejemplo n.º 41
0
    A = read_csv('{}/{}{:.0f}.dat'.format(arampath, prefix, last),
                 sep=',').values

    #[0]Index_x [1]Index_y [2,3,4]Undef_X,Y,Z inches [5,6,7]Def_X,Y,Z inches [8,9,10,11]DefGrad (11 12 21 22) *)
A = A[n.abs(A[:, 3]) < 0.2, :]  # Reduces the amount we'll have to plot

if not os.path.exists('box_limits.dat'):
    x = A[:, 2]
    y = A[:, 3]

    #Calculation for each facet of the Logarithmic cumulative plastic strain
    LEp = n.empty(len(x))
    for i in range(len(x)):
        F = A[i, 8:].reshape(2, 2)
        U = sqrtm((F.T).dot(F))  #Stretch Tensor
        eigU = eigvalsh(U)
        #logarithmic strain in the principal coordinate system
        LE = n.log(eigU)
        #Logarithmic Cumulative Plastic strain
        LEp[i] = (2 / 3 * (LE[0]**2 + LE[1]**2 + (-LE[0] - LE[1])**2))**0.5

    xspace = linspace(n.min(x), n.max(x), len(n.unique(A[:, 0])) / 2)
    yspace = linspace(n.min(y), n.max(y), len(n.unique(A[:, 1])) / 2)

    LEP = griddata(vstack((x, y)).T,
                   LEp, (xspace[None, :], yspace[:, None]),
                   method='linear')
    meanLEP = nanmean(LEP.flatten())
    stdLEP = nanstd(LEP.flatten())

    p.close('all')
Ejemplo n.º 42
0
    def _check(self):
        super(GMMHMM, self)._check()

        if not hasattr(self, "n_features"):
            self.n_features = self.means_.shape[2]

        self._init_covar_priors()
        self._fix_priors_shape()

        # Checking covariance type
        if self.covariance_type not in COVARIANCE_TYPES:
            raise ValueError(
                "covariance_type must be one of {0}".format(COVARIANCE_TYPES))

        self.weights_ = np.array(self.weights_)
        # Checking mixture weights' shape
        if self.weights_.shape != (self.n_components, self.n_mix):
            raise ValueError("mixture weights must have shape "
                             "(n_components, n_mix), "
                             "actual shape: {0}".format(self.weights_.shape))

        # Checking mixture weights' mathematical correctness
        if not np.allclose(np.sum(self.weights_, axis=1),
                           np.ones(self.n_components)):
            raise ValueError("mixture weights must sum up to 1")

        # Checking means' shape
        self.means_ = np.array(self.means_)
        if self.means_.shape != (self.n_components, self.n_mix,
                                 self.n_features):
            raise ValueError("mixture means must have shape "
                             "(n_components, n_mix, n_features), "
                             "actual shape: {0}".format(self.means_.shape))

        # Checking covariances' shape
        self.covars_ = np.array(self.covars_)
        covars_shape = self.covars_.shape
        needed_shapes = {
            "spherical": (self.n_components, self.n_mix),
            "tied": (self.n_components, self.n_features, self.n_features),
            "diag": (self.n_components, self.n_mix, self.n_features),
            "full":
            (self.n_components, self.n_mix, self.n_features, self.n_features)
        }
        needed_shape = needed_shapes[self.covariance_type]
        if covars_shape != needed_shape:
            raise ValueError("{!r} mixture covars must have shape {0}, "
                             "actual shape: {1}".format(
                                 self.covariance_type, needed_shape,
                                 covars_shape))

        # Checking covariances' mathematical correctness
        from scipy import linalg

        if (self.covariance_type == "spherical"
                or self.covariance_type == "diag"):
            if np.any(self.covars_ <= 0):
                raise ValueError(
                    "{!r} mixture covars must be non-negative".format(
                        self.covariance_type))
        elif self.covariance_type == "tied":
            for i, covar in enumerate(self.covars_):
                if (not np.allclose(covar, covar.T)
                        or np.any(linalg.eigvalsh(covar) <= 0)):
                    raise ValueError("'tied' mixture covars must be "
                                     "symmetric, positive-definite")
        elif self.covariance_type == "full":
            for i, mix_covars in enumerate(self.covars_):
                for j, covar in enumerate(mix_covars):
                    if (not np.allclose(covar, covar.T)
                            or np.any(linalg.eigvalsh(covar) <= 0)):
                        raise ValueError(
                            "'full' covariance matrix of "
                            "mixture {0} of component {1} must be "
                            "symmetric, positive-definite".format(j, i))
Ejemplo n.º 43
0
    def _estimate_driven_gain(self, regul=0.):
        """Estimate the gain expressed over the basis by maximizing
        the likelihood, through a Newton-Raphson procedure

        The residual e(t) is modelled as a white noise with standard
        deviation driven by the driver x:
            std(e(t)) = exp(b(0) + b(1)x(t) + ... + b(m)x(t)^m)

        iter_gain : maximum number of iterations
        eps_gain  : stop iteration when corrections get smaller than eps_gain
        regul     : regularization factor (for inversion of the Hessian)
        """
        iter_gain = self.iter_gain
        eps_gain = self.eps_gain
        ordar_ = self.ordar_

        # -------- get the training data
        basis, residual, weights = self._get_train_data(
            [self.basis_, self.residual_])

        residual += EPSILON

        # -------- crop the ordar first values
        residual = residual[:, ordar_:]
        basis = basis[:, :, ordar_:]
        if weights is not None:
            weights = weights[:, ordar_:]

        # concatenate the epochs since it does not change the computations
        # as in estimating the AR coefficients
        residual = residual.reshape(1, -1)
        basis = basis.reshape(basis.shape[0], -1)
        if weights is not None:
            weights = weights.reshape(1, -1)

        residual2 = residual**2
        n_points = residual.size
        self.G_ = np.zeros((1, self.n_basis))

        # -------- prepare an initial estimation
        # chose N = 3 * ordriv_ classes, estimate a standard deviation
        # on each class, and make a regression on the values of
        # the indexes for their centroid.
        # For a time-dependent signal, a good idea was to take
        # successive slices of the signal, but for signal driven
        # models, this would be wrong! Use levels of the driving
        # function instead.
        if self.n_basis > 1:
            index = np.argsort(basis[1, :])  # indexes to sort the basis
        else:
            index = np.arange(n_points, dtype=int)

        nbslices = 3 * self.n_basis  # number of slices
        lenslice = n_points // nbslices  # length of a slice
        e = np.zeros(nbslices)  # log energies

        # -------- prepare least-squares equations
        tmp = lenslice * np.arange(nbslices + 1)
        kmin = tmp[:-1]
        kmax = tmp[1:]
        kmid = (kmin + kmax) // 2
        for k, (this_min, this_max) in enumerate(zip(kmin, kmax)):
            indices = index[this_min:this_max]
            if weights is not None:
                e[k] = np.average(residual2[0, indices],
                                  weights=weights[0, indices])
            else:
                e[k] = np.mean(residual2[0, indices])

        # this least-square is not weighted, only the construction of e
        e = 0.5 * np.log(e)
        R = np.dot(basis[:, index[kmid]], basis[:, index[kmid]].T)
        r = np.dot(e, basis[:, index[kmid]].T)

        # -------- regularize matrix R
        v = np.resize(linalg.eigvalsh(R), (1, self.n_basis))
        correction = regul * np.max(v)

        # add tiny correction on diagonal without a copy
        R.flat[::len(R) + 1] += correction

        # -------- compute regularized solution
        self.G_ = linalg.solve(R, r)
        self.G_.shape = (1, self.n_basis)

        # -------- prepare the likelihood (and its initial value)
        loglike = np.zeros(iter_gain + 1)
        loglike.fill(np.nan)

        if iter_gain > 0:
            resreg = basis * residual
            if weights is not None:
                weighted_basis = weights * basis
                weighted_resreg = weights * resreg
        # -------- refine this model (iteratively maximise the likelihood)
        for itnum in range(iter_gain):
            sigma2 = self._compute_sigma2(basis)
            loglike[itnum] = wgn_log_likelihood(residual,
                                                sigma2,
                                                weights=weights)

            tmp = residual2 / sigma2 - 1.0
            if weights is not None:
                gradient = np.sum(weighted_basis * tmp, 1)
                hessian = -2.0 * np.dot(weighted_resreg / sigma2, resreg.T)
            else:
                gradient = np.sum(basis * tmp, 1)
                hessian = -2.0 * np.dot(resreg / sigma2, resreg.T)
            dG = linalg.solve(hessian, gradient)
            self.G_ -= dG.T
            if np.amax(np.absolute(dG)) < eps_gain:
                iter_gain = itnum + 1
                break

        sigma2 = self._compute_sigma2(basis)
        self.residual_bis_ = residual / np.sqrt(sigma2)
Ejemplo n.º 44
0
def main(want_to_plot):
    #
    # ============== Make Directory for Plots if it's not there already ==========
    #
    # detect the current working directory and print it
    path = os.getcwd()
    print("The current working directory is %s" % path)
    # define the name of the directory to be created
    Plot_Output = path + "/Plot_Output"
    if want_to_plot is True:
        if os.path.exists(Plot_Output):
            print("Directory for wave function plots already exists",
                  Plot_Output)
        else:
            print("Attempting to create directory for wave function plots ",
                  Plot_Output)
            try:
                os.mkdir(Plot_Output)
            except OSError:
                print("Creation of the directory %s failed" % Plot_Output)
            else:
                print("Successfully created the directory %s " % Plot_Output)
    #
    # === Make Directory for for the .dat output  if it's not there already ======
    #
    # detect the current working directory and print it
    path = os.getcwd()
    print("The current working directory is %s" % path)
    # define the name of the directory to be created
    Data_Output = path + "/Data_Output"
    if os.path.exists(Data_Output):
        print("Directory for output .dat files  already exists", Plot_Output)
    else:
        print("Attempting to create directory for .dat output files  ",
              Plot_Output)
        try:
            os.mkdir(Data_Output)
        except OSError:
            print("Creation of the directory %s failed" % Data_Output)
        else:
            print("Successfully created the directory %s " % Data_Output)
    # ===================================================================
    if want_to_plot is True:
        wfcn_plotfile = open("Data_Output/wavefunctions.dat",
                             "w")  # data file for saving wavefunctions
    #
    # =============Constants and conversion factors ==============
    Daltons_to_eMass = 1822.89
    bohr_to_Angstrom = 0.529177
    Hartree_to_eV = 27.211386245988  # NIST ref
    eV_to_wavennumber = 8065.54393734921  # NIST ref on constants + conversions
    Hartree_to_wavenumber = (
        2.1947463136320e5  # value from NIST ref on constants + conversions
    )
    atu_to_fs = 24.18884326509 / 1000
    HartreeToKelvin = 315773
    #
    # =====================================FEM_DVR===================================
    #  Set up the FEM DVR grid given only the Finite Element boundaries and order
    #  of Gauss Lobatto quadrature,  and compute the Kinetic Energy matrix for
    #  this FEM DVR for Mass = mass set in call (atomic units).
    #
    # Here is where the reduced mass (or mass for any 1D problem) is set
    # He_Mass = 4.0026
    # O_Mass =  15.99491461957 # O 16 mass from NIST tables
    H_Mass = 1.007825032  # H atom atomic mass
    mu = (H_Mass / 2.0) * Daltons_to_eMass
    n_order = 30
    FEM_boundaries = [
        0.4,
        1.0,
        1.5,
        2.0,
        2.5,
        3.0,
        3.5,
        4.0,
        4.5,
        5.0,
        5.5,
        6.0,
        6.5,
        7.0,
        7.5,
        8.0,
        8.5,
        9.0,
        9.5,
        20.0,
    ]
    fem_dvr = FEM_DVR(n_order, FEM_boundaries, Mass=mu)
    print("\nFEM-DVR basis of ", fem_dvr.nbas, " functions")
    #
    #   Function to define potential at x and t (if potential is time-dependent)
    #   DVRHelper class library expects function for V(x,t) in general
    #
    # =================================Potential=====================================
    #  Read in files with points for potential curve in hartrees
    #  and load in arrays for interpolation
    #
    path = Path(__file__).parent.absolute()
    perturbation = Potential(path / "potcurve_CISD_H2_ccpvTZ.dat")

    n_vals_pot = perturbation.r_data.shape[0]
    #
    # ===========================================================================================
    #  Plot potential on the DVR grid points on which the wavefunction is defined
    #  and ALSO the interpolation to check we are using the potential that we mean to.
    #
    if want_to_plot is True:
        print("\n Plot potential ")
        x_Plot = []
        pot_Plot = []
        Number_plot_points = 741
        for j in range(0, n_vals_pot):
            x_Plot.append(np.real(perturbation.r_data[j]))
            pot_Plot.append(np.real(perturbation.V_data[j]))
        plt.suptitle("V(r) interpolation", fontsize=14, fontweight="bold")
        string = "V input points"
        plt.plot(x_Plot, pot_Plot, "ro", label=string)
        #
        x_Plot = []
        pot_Plot = []
        dx = (np.real(fem_dvr.x_pts[fem_dvr.nbas - 1]) -
              np.real(fem_dvr.x_pts[0])) / float(Number_plot_points - 1)
        time = 0.0  # dummy time in general call to potential function
        for j in range(0, Number_plot_points):
            x = np.real(fem_dvr.x_pts[0]) + j * dx
            try:
                x >= perturbation.r_data[0] and x <= perturbation.r_data[
                    n_vals_pot - 1]
            except IndexError:
                print(
                    "Number of plot points is out of range of pertubation data"
                )
            x_Plot.append(x)
            pot_Plot.append(perturbation.V_Interpolated(x, time))

        plt.plot(x_Plot,
                 pot_Plot,
                 "-b",
                 label="Interpolation on DVR grid range")
        plt.legend(loc="best")
        plt.xlabel(" x ", fontsize=14)
        plt.ylabel("V", fontsize=14)
        print(
            "\n Running from terminal, close figure window to proceed and make .pdf file of figure"
        )
        #   Insert limits if necessary
        # xmax = float(rmax)  # CWM: need to use float() to get plt.xlim to work to set x limits
        # plt.xlim([0,xmax])
        # number_string = str(a)
        plt.savefig("Plot_Output/" + "Plot_potential" + ".pdf",
                    transparent=False)
        plt.show()
    #
    #
    # =============Build Hamiltonian (using general routine with dummy time t=0)=========
    #     Pass name of potential function explicitly here
    time = 0.0
    H_mat = fem_dvr.Hamiltonian(perturbation.vectorized_V_Interpolated, time)
    print("\n Completed construction of Hamiltonian ")
    # ====================================================================================
    #
    # Find all the eigenvalues of the Hamiltonian so we can compare with known bound state energies
    # or make a plot of the spectrum -- For a time-independent Hamiltonian example here
    #
    EigenVals = LA.eigvalsh(H_mat)
    #
    n_energy = 20
    for i in range(0, n_energy):
        print(
            "E( ",
            i,
            ") =   ",
            EigenVals[i],
            " hartrees, excitation energy = ",
            (EigenVals[i] - EigenVals[0]) * Hartree_to_wavenumber,
            " cm^-1",
        )
    # ====================================================================================
    #
    if want_to_plot is True:
        # Extract the n_Plot'th eigenfunction for plotting and use as initial wave function
        # to test propagation
        #
        number_of_eigenvectors = 20
        #
        #  Here n_Plot picks which eigenfunction to plot
        n_Plot = (
            10  # pick a state of this potential to plot < number_of_eigenvectors -1
        )
        #
        print(
            "Calculating ",
            number_of_eigenvectors,
            " eigenvectors for plotting eigenfunctions",
        )
        EigenVals, EigenVecs = LA.eigh(H_mat,
                                       eigvals=(0, number_of_eigenvectors))
        wfcnPlot = []
        for j in range(0, fem_dvr.nbas):
            wfcnPlot.append(np.real(EigenVecs[j, n_Plot]))
        #
        # normalize  wave function from diagonalization
        #
        norm_squared = 0.0
        for j in range(0, fem_dvr.nbas):
            norm_squared = norm_squared + np.abs(wfcnPlot[j])**2
        wfcnPlot = wfcnPlot / np.sqrt(norm_squared)
        norm_squared = 0.0
        for j in range(0, fem_dvr.nbas):
            norm_squared = norm_squared + np.abs(wfcnPlot[j])**2
        print("Norm of wave function being plotted is ", np.sqrt(norm_squared))
        #
        # ================# Plot the  wave function specified by n_Plot above======================
        #   It must be type np.complex for this general wave function plotting logic
        #
        # Cinitial = np.zeros((fem_dvr.nbas), dtype=np.complex)
        # wfcnInitialPlot = np.zeros((fem_dvr.nbas), dtype=np.complex)
        Cinitial = np.zeros(fem_dvr.nbas)
        wfcnInitialPlot = np.zeros(fem_dvr.nbas)
        for j in range(0, fem_dvr.nbas):
            Cinitial[j] = wfcnPlot[j]
        #
        print(
            "\n Plot wave function ",
            n_Plot,
            " (numbered in order of increasing energy)",
        )
        title = "Wavefunction" + str(n_Plot)
        #  note that the dvr.Plot_Psi function makes a .pdf file in the Plot_Output directory
        #  That's what make_plot=True controls.
        x_Plot_array, Psi_plot_array = fem_dvr.Plot_Psi(
            Cinitial,
            plot_title_string=title,
            N_plot_points=Number_plot_points,
            make_plot=True,
        )
        # write the data in file also
        for j in range(0, Number_plot_points):
            print(
                x_Plot_array[j],
                "  ",
                np.real(Psi_plot_array[j]),
                "  ",
                np.imag(Psi_plot_array[j]),
                file=wfcn_plotfile,
            )
        print("&  \n ", file=wfcn_plotfile)
        #
        # plot square of wave function (radial probability distribution)
        #
        Csquared = np.zeros(fem_dvr.nbas, dtype=np.complex)
        raverage = 0.0
        for i in range(fem_dvr.nbas):
            Csquared[i] = (Cinitial[i]**2) / np.sqrt(fem_dvr.w_pts[i + 1])
            #   compute <r> for this wave function
            #   note that Cinitial[i] contains the necessary weight, sqrt(dvr.w_pts[i+1])
            raverage = raverage + Cinitial[i]**2 * fem_dvr.x_pts[i + 1]
        print("\n Average value of r using DVR for the integral, <r> = ",
              raverage)
        title = "Wavefunction" + str(n_Plot) + "^2"
        #  note that the dvr.Plot_Psi function makes a .pdf file in the Plot_Output directory
        #  That's what make_plot=True controls.
        x_Plot_array, Psi_plot_array = fem_dvr.Plot_Psi(
            Csquared,
            plot_title_string=title,
            N_plot_points=Number_plot_points,
            make_plot=True,
        )
        # write the data in file also
        for j in range(0, Number_plot_points):
            print(
                x_Plot_array[j],
                "  ",
                np.real(Psi_plot_array[j]),
                "  ",
                np.imag(Psi_plot_array[j]),
                file=wfcn_plotfile,
            )
        print("&  \n ", file=wfcn_plotfile)
Ejemplo n.º 45
0
def eigh_gen(A, B):
    """Solve the generalised eigenvalue problem. :math:`\mathbf{A} \mathbf{v} =
    \lambda \mathbf{B} \mathbf{v}`

    This routine will attempt to correct for when `B` is not positive definite
    (usually due to numerical precision), by adding a constant diagonal to make
    all of its eigenvalues positive.

    Parameters
    ----------
    A, B : np.ndarray
        Matrices to operate on.

    Returns
    -------
    evals : np.ndarray
        Eigenvalues of the problem.
    evecs : np.ndarray
        2D array of eigenvectors (packed column by column).
    add_const : scalar
        The constant added on the diagonal to regularise.
    """
    add_const = 0.0

    if (A == 0).all():
        evals, evecs = np.zeros(A.shape[0],
                                dtype=A.real.dtype), np.identity(A.shape[0],
                                                                 dtype=A.dtype)

    else:

        try:
            evals, evecs = la.eigh(A, B, overwrite_a=True, overwrite_b=True)
        except la.LinAlgError as e:
            print "Error occured in eigenvalue solve."
            # Get error number
            mo = re.search('order (\\d+)', e.message)

            # If exception unrecognised then re-raise.
            if mo is None:
                raise e

            errno = mo.group(1)

            if errno < (A.shape[0] + 1):

                print "Matrix probably not positive definite due to numerical issues. \
                Trying to add a constant diagonal...."

                evb = la.eigvalsh(B)
                add_const = 1e-15 * evb[-1] - 2.0 * evb[0] + 1e-60

                B[np.diag_indices(B.shape[0])] += add_const
                evals, evecs = la.eigh(A,
                                       B,
                                       overwrite_a=True,
                                       overwrite_b=True)

            else:
                print "Strange convergence issue. Trying non divide and conquer routine."
                evals, evecs = la.eigh(A,
                                       B,
                                       overwrite_a=True,
                                       overwrite_b=True,
                                       turbo=False)

    return evals, evecs, add_const
Ejemplo n.º 46
0
def crossVal(prob, k=1, kernels=['Linear','Gaussian'], numPC=2, KSError=False, d=0, mu=1, normPCA=True, lams=None, linCon=True, covCon=True, dualize=True, outputFlag=True):
    # Given problem object, conducts 5-fold cross-validation with the parameters defined,
    # and returns the mean error (or fairness), the average variance of the dataset
    # explained by the principal components found, the total variance of the datasets,
    # the average top eigenvalues of the optimal solution to the SDP, and the average
    # correlation of the PC's found with the true PC's
    
    idx = np.arange(prob.numPoints); np.random.shuffle(idx)
    
    if KSError: errList = np.empty(k)
    else: errList = np.empty((k,len(kernels)))
    varExpList = np.empty(k)
    totVarList = np.empty(k)
    eigList = [[]]*k
    eigVecList = [[]]*k
    corrList = [[]]*k
    #train_test = np.array([train_test_split((idx),test_size=0.3) for i in range(k)])
    #trainFolds, testFolds = [list(train_test[:,0]),list(train_test[:,1])]
    testFolds = [idx[int(i*prob.numPoints/k):int((i+1)*prob.numPoints/k)] for i in range(k)]
    trainFolds = [np.concatenate([testFolds[j] for j in range(k) if j!=i]) for i in range(k)]
    dat = copy.deepcopy(prob.data)
    srsp = copy.deepcopy(prob.sideResp)
    if outputFlag: print('#################################################################')
    for iteration,(train,test) in enumerate(zip(trainFolds,testFolds)):
        if outputFlag: print('Iteration',iteration)
        if outputFlag: print('Fair PCA Parameters: numPC=%s, d=%s, mu=%s'%(numPC,d,mu))
        if normPCA: prob.data, means, stdevs = normalize(dat[train])
        else: prob.data = dat[train]
        prob.sideResp = srsp[train]
        if linCon and not covCon: B,m = prob.zpca(dimPCA=numPC,d=d)
        elif covCon: B,m = prob.spca(dimPCA=numPC,addLinear=linCon,mu=mu,d=d,dualize=dualize)
        else: B,m = prob.pca(dimPCA=numPC)
        eigVecs = la.eigh(m.m.dat.T.dot(m.m.dat))[1][:,-numPC:]
        varExp = np.trace(B.T.dot(m.m.dat.T).dot(m.m.dat.dot(B)))
        totVar = np.trace(m.m.dat.T.dot(m.m.dat))
        eig = np.round(la.eigvalsh(m.m.X)[-numPC:],4)
        corr = [round(la.norm(b,2),2) for b in B.T.dot(eigVecs)]
        
        varExpList[iteration] = varExp
        totVarList[iteration] = totVar
        eigList[iteration] = eig
        eigVecList[iteration] = eigVecs
        corrList[iteration] = corr
    
        if outputFlag: print(m.m.m.getprosta(mosek.soltype.itr))
        if m.m.m.getprosta(mosek.soltype.itr) not in [mosek.prosta.prim_and_dual_feas,mosek.prosta.unknown]: return None, None, None, None, None
        if outputFlag:
            print('Top eigenvalues of solution:',eig)
            print('Correlation with top eigenvectors:',corr)
            print('Proportion of variance explained:', varExp/totVar)
            print('Proportion of deviation explained:', np.sqrt(varExp/totVar))
            if linCon or covCon:
                if np.max(np.abs((m.m.B.T.dot(m.getZCon(prob.sideResp).reshape((prob.numFields,1))))))>1e-7:
                    print('Linear constraint unsatisfied')
                else:
                    print('Linear constraint satisfied')
        
        if normPCA: prob.data = normalize(((dat[test]-means[:len(test)])/stdevs[:len(test)]+means[:len(test)]).dot(B))[0]
        else: prob.data = normalize(dat[test].dot(B))[0]
        prob.sideResp = srsp[test]
        if KSError: errList[iteration] = np.max(np.abs(multiDimCDF(prob.data,prob.sideResp)))
        else:
            for kernum,kernel in enumerate(kernels):
                if outputFlag: print(kernel,'SVM:')
                if kernel=='Linear': svm, err = prob.svm(useSRSP=True,outputFlag=outputFlag,lams=lams)[1:3]
                elif kernel=='Gaussian': svm, err = prob.svm(useSRSP=True,dual=True,kernel=lambda x,y: math.exp(-la.norm(x-y)**2/2),outputFlag=outputFlag,lams=lams)[1:3]
                elif kernel=='Polynomial': svm, err = prob.svm(useSRSP=True,dual=True,conic=False,kernel=lambda x,y: (x.T.dot(y)+1)**2,outputFlag=outputFlag,lams=lams)[1:3]
                else:
                    if outputFlag: print('\tIncorrect kernel name')
                    continue
                errList[iteration,kernum] = err
    if outputFlag:
        print('-----------------------------------------------------------------')
        print('Average variation explained:',np.round(100*np.mean(varExpList/totVarList),2))
        print('Average deviation explained:',np.round(100*np.mean(np.sqrt(varExpList/totVarList)),2))
        print('Average errors',np.round(np.mean(errList,axis=0),4))
        # winsound.PlaySound("SystemExit", winsound.SND_ALIAS)
    prob.data = dat
    prob.sideResp = srsp
    return errList, varExpList, totVarList, eigList, eigVecList, corrList
Ejemplo n.º 47
0
 def eigenval(self, k):
     return la.eigvalsh(self.hamilton(k))
Ejemplo n.º 48
0
    def dist(self,
             G1,
             G2,
             normed=True,
             kernel='normal',
             hwhm=0.011775,
             measure='jensen-shannon',
             k=None,
             which='LM'):
        """Graph distances using different measure between the Laplacian
        spectra of the two graphs

        The spectra of both Laplacian matrices (normalized or not) is
        computed. Then, the discrete spectra are convolved with a kernel
        to produce continuous ones. Finally, these distribution are
        compared using a metric.

        The results dictionary also stores a 2-tuple of the underlying
        adjacency matrices in the key `'adjacency_matrices'`, the Laplacian
        matrices in `'laplacian_matrices'`, the eigenvalues of the Laplacians
        in `'eigenvalues'`. If the networks being compared are directed, the
        augmented adjacency matrices are calculated and stored in
        `'augmented_adjacency_matrices'`.

        Note : The methods are usually applied to undirected (unweighted)
        networks. We however relax this assumption using the same method
        proposed for the Hamming-Ipsen-Mikhailov. See paper :
        https://ieeexplore.ieee.org/abstract/document/7344816.

        Params
        ------

        G1, G2 (nx.Graph): two networkx graphs to be compared.

        normed (bool): If true, uses the normalized laplacian matrix,
        otherwise the raw laplacian matrix is used.

        kernel (str): kernel to obtain a continuous spectrum. Choices
        available are
            -'normal'
            -'lorentzian'
            -None
        If None is chosen, the discrete spectrum is used instead, and the
        measure is simply the euclidean distance between the vector of
        eigenvalues for each graph.

        hwhm (float): half-width at half-maximum for the kernel. The default
        value is chosen such that the standard deviation for the normal
        distribution is 0.01, as in the paper
        https://www.sciencedirect.com/science/article/pii/S0303264711001869.
        This option is relevant only if kernel is not None.

        measure (str): metric between the two continuous spectra. Choices
        available are
            -'jensen-shannon'
            -'euclidean'
        This option is relevant only if kernel is not None.

        k (int): number of eigenvalues kept for the (discrete) spectrum, also
        used to create the continuous spectrum. If None, all the eigenvalues
        are used. k must be smaller (strictly) than the size of both graphs.

        which (str): if k is not None, this option specifies the eigenvalues
        that are kept. See the choices offered by `scipy.sparse.linalg.eigsh`.
        The largest eigenvalues in magnitude are kept by default.

        Returns
        -------

        dist (float): the distance between G1 and G2.

        """

        #get the adjacency matrices
        adj1 = nx.to_numpy_array(G1)
        adj2 = nx.to_numpy_array(G2)
        self.results['adjacency_matrices'] = adj1, adj2

        #verify if the graphs are directed (at least one)
        directed = nx.is_directed(G1) or nx.is_directed(G2)

        if directed:
            #create augmented adjacency matrices
            N1 = len(G1)
            N2 = len(G2)
            null_mat1 = np.zeros((N1, N1))
            null_mat2 = np.zeros((N2, N2))
            adj1 = np.block([[null_mat1, adj1.T], [adj1, null_mat1]])
            adj2 = np.block([[null_mat2, adj2.T], [adj2, null_mat2]])
            self.results['augmented_adjacency_matrices'] = adj1, adj2

        #get the laplacian matrices
        lap1 = laplacian(adj1, normed=normed)
        lap2 = laplacian(adj2, normed=normed)
        self.results['laplacian_matrices'] = lap1, lap2

        #get the eigenvalues of the laplacian matrices
        if k is None:
            ev1 = np.abs(eigvalsh(lap1))
            ev2 = np.abs(eigvalsh(lap2))
        else:
            #transform the dense laplacian matrices to sparse representations
            lap1 = csgraph_from_dense(lap1)
            lap2 = csgraph_from_dense(lap2)
            ev1 = np.abs(eigsh(lap1, k=k, which=which)[0])
            ev2 = np.abs(eigsh(lap2, k=k, which=which)[0])
        self.results['eigenvalues'] = ev1, ev2

        if kernel is not None:
            #define the proper support
            a = 0
            if normed:
                b = 2
            else:
                b = np.inf

            #create continuous spectra
            density1 = _create_continuous_spectrum(ev1, kernel, hwhm, a, b)
            density2 = _create_continuous_spectrum(ev2, kernel, hwhm, a, b)

            #compare the spectra
            dist = _spectra_comparison(density1, density2, a, b, measure)
            self.results['dist'] = dist
        else:
            #euclidean distance between the two discrete spectra
            dist = np.linalg.norm(ev1 - ev2)
            self.results['dist'] = dist

        return dist
Ejemplo n.º 49
0
def band_eigenvalues_and_eigenvectors(params,kx,ky,kz,VBM,proj_vec,type_):
	# Make the parameters of the TB model
	es = params[0]
	ep = params[1]
	ed = params[2]
	ed_ = params[3]
	vss = params[4]
	vxx = params[5]
	vxy = params[6]
	vsp = params[7]
	vsdS = params[8]
	vpdS = params[9]
	vpdP = params[10]
	vddS = params[11]
	vddP = params[12]
	vddD = params[13]
	v15 = vsdS/(3.0**0.5)
	v18 = 0
	v19 = 0
	v25 = ((3**0.5)*vpdS + vpdP)/(3.0*(3.0**0.5))
	v26 = ((3**0.5)*vpdS - (2.0*vpdP))/(3.0*(3.0**0.5))
	v27 = v25
	v28 = vpdP/(3.0**0.5)
	v29 = -1.0*vpdP/3.0
	v38 = -1.0*v28
	v39 = v29
	v48 = 0.0
	v49 = -2.0*v29
	v55 = ((3.0*vddS)+(2.0*vddP)+(4.0*vddD))/9.0
	v56 = ((3.0*vddS)-vddP-(2.0*vddD))/9.0
	v57 = v56
	v58 = 0.0
	v78 = (vddP-vddD)/3.0
	v59 = -2.0*v78/(3.0**0.5)
	v68 = -1.0*v78
	v79 = (vddP-vddD)/(3.0*(3.0**0.5))
	v69 = v79
	v88 = 2.0*(vddP/3.0)+(vddD/3.0)
	v89 = 0.0
	v99 = v88

	# Get the phase factors for the TB model
	g = 0.25*np.array([cmath.rect(1,(np.pi/2)*(kx+ky+kz)) +cmath.rect(1,(np.pi/2)*(kx-ky-kz)) +cmath.rect(1,(np.pi/2)*(-kx+ky-kz)) +cmath.rect(1,(np.pi/2)*(-kx-ky+kz)),
					   cmath.rect(1,(np.pi/2)*(kx+ky+kz)) +cmath.rect(1,(np.pi/2)*(kx-ky-kz)) -cmath.rect(1,(np.pi/2)*(-kx+ky-kz)) -cmath.rect(1,(np.pi/2)*(-kx-ky+kz)),
					   cmath.rect(1,(np.pi/2)*(kx+ky+kz)) -cmath.rect(1,(np.pi/2)*(kx-ky-kz)) +cmath.rect(1,(np.pi/2)*(-kx+ky-kz)) -cmath.rect(1,(np.pi/2)*(-kx-ky+kz)),
					   cmath.rect(1,(np.pi/2)*(kx+ky+kz)) -cmath.rect(1,(np.pi/2)*(kx-ky-kz)) -cmath.rect(1,(np.pi/2)*(-kx+ky-kz)) +cmath.rect(1,(np.pi/2)*(-kx-ky+kz))],dtype=complex)

	# Make the on-site potential matrix
	self_ham = np.array([[es, 	0.0, 	0.0,  	0.0,	0.0,	0.0,	0.0,	0.0,	0.0],
						 [0.0,	ep, 	0.0,	0.0,	0.0,	0.0,	0.0,	0.0,	0.0],
						 [0.0,	0.0,	ep,	    0.0,	0.0,	0.0,	0.0,	0.0,	0.0],
						 [0.0,	0.0,	0.0,	ep,	    0.0,	0.0,	0.0,	0.0,	0.0],
						 [0.0,	0.0,	0.0,	0.0,	ed,	    0.0,	0.0,	0.0,	0.0],
						 [0.0,	0.0,	0.0,	0.0,	0.0,	ed,   	0.0,	0.0,	0.0],
						 [0.0,	0.0,	0.0,	0.0,	0.0,	0.0,	ed, 	0.0,	0.0],
						 [0.0,	0.0,	0.0,	0.0,	0.0,	0.0,	0.0,	ed_,	0.0],
						 [0.0,	0.0,	0.0,	0.0,	0.0,	0.0,	0.0,	0.0,	ed_]],dtype=complex)

	# Make the interaction potential matrix
	interact_ham = np.array([[vss*g[0], vsp*g[1], vsp*g[2], vsp*g[3], v15*g[3], v15*g[1], v15*g[2], v18*g[0], v19*g[0]],
							[-vsp*g[1], vxx*g[0], vxy*g[3], vxy*g[2], v25*g[2], v26*g[0], v27*g[3], v28*g[1], v29*g[1]],
							[-vsp*g[2], vxy*g[3], vxx*g[0], vxy*g[1], v27*g[1], v25*g[3], v26*g[0],-v28*g[2], v29*g[2]],
							[-vsp*g[3], vxy*g[2], vxy*g[1], vxx*g[0], v26*g[0], v27*g[2], v25*g[1], v48*g[3], v49*g[3]],
							[ v15*g[3],-v25*g[2],-v27*g[1],-v26*g[0], v55*g[0], v56*g[2], v56*g[1], v58*g[3], v59*g[3]],
							[ v15*g[1],-v26*g[0],-v25*g[3],-v27*g[2], v56*g[2], v55*g[0], v56*g[3], v68*g[1], v69*g[1]],
							[ v15*g[2],-v27*g[3],-v26*g[0],-v25*g[1], v56*g[1], v56*g[3], v55*g[0], v78*g[2], v79*g[2]],
							[ v18*g[0],-v28*g[1], v28*g[2],-v48*g[3], v58*g[3], v68*g[1], v78*g[2], v88*g[0], v89*g[0]],
							[ v19*g[0],-v29*g[1],-v29*g[2],-v49*g[3], v59*g[3], v69*g[1], v79*g[2], v89*g[0], v99*g[0]]],dtype=complex)
	
	# Get the complex conjugate of the interaction matrix
	conj_interact_ham = np.conj(interact_ham.T)

	# Make the Hamiltonian out of the on-site potential and the interaction
	num_atoms = 2
	hamiltonian = np.zeros((num_atoms,num_atoms,9,9),dtype=complex)
	for i in range(num_atoms):
		for j in range(num_atoms):
			if i == j:
				hamiltonian[i][j] = self_ham
			if i == j + 1:
				hamiltonian[i][j] = interact_ham
			if j == i + 1:
				hamiltonian[i][j] = conj_interact_ham

	hamiltonian = np.concatenate(hamiltonian,axis=2)
	hamiltonian = np.concatenate(hamiltonian,axis=0)

	# If calculating the residual, use the projected wave function
	if type_ == 0:
		vss = proj_vec
		es = LA.eigvalsh(hamiltonian)
	# If calculating the projection, get the regular tight binding eigenvectors
	else:
		es, vss = LA.eigh(hamiltonian)

	# Shift the eigenvalues by the VBM
	vbm = len(es)*[VBM]	
	return es-vbm, vss
#                                   main
#
#################################################

# interaction coefficient
g = 0.5
# energy level spacing
d = 1

# initial Hamiltonian for the given values of g and d
H0 = hamiltonian(d, g)
# the dimension of one side of the Hamiltonian (its square)
dim = H0.shape[0]

# calculate exact eigenvalues
eigenvalues = eigvalsh(H0)

# turn initial Hamiltonian into a linear array
y0 = reshape(H0, -1)

# flow parameter step
ds = 1e-4

# flow parameters for snapshot images
flowparams = arange(0, 10, ds)

# threshold to end summation
threshold = 1e-3


def generate_data(flow_parameters, threshold, d, g):
Ejemplo n.º 51
0
def numpy_eigen():
    A = np.array([[1, 3, 5], [3, 5, 3], [5, 3, 9]])
    evals, evecs = la.eig(A)
    return la.eigvalsh(A)
# la.eigvalsh(matrix)   # 행렬의 수치적 실고윳값
# la.eigh(matrix)       # 행렬의 수치적 실고윳값/고유벡터

eps, delta = sympy.symbols("epsilon, Delta")
H = sympy.Matrix([[eps, delta], [delta, -eps]])
print(H)
## 해석적 방법으로 고윳값을 구한 결과
print(">>> 해석적 고윳값 <<<")
print(H.eigenvals())
print(H.eigenvects())

(eval1, _, evec1), (eval2, _, evec2) = H.eigenvects()
inner_raw = evec1[0].T * evec2[0]
inner = sympy.simplify(inner_raw)
print(inner_raw)
print(inner)

## 수치적 방법으로 고윳값을 구한 결과
print("\n>>> 수치적 고윳값 <<<")
A = np.array([[1, 3, 5], [3, 5, 3], [5, 3, 9]])
evals, evecs = la.eig(A)
print("수치 고윳값 =", evals)
print("수치 고유벡터 =\n", evecs)
# 복소 수치적 접근
print("수치 고윳값 =", la.eigvals(A))
print("수치 고윳값/고유벡터 =\n", la.eig(A))
# 실수만을 대상으로 하는 수치적 접근
# 실대칭/에르미트 행렬은 전부 실수 고윳값을 가지므로 이걸 사용하면 더 좋음
print("수치 실고윳값 =", la.eigvalsh(A))
print("수치 실고윳값/고유벡터 =\n", la.eigh(A))
Ejemplo n.º 53
0
def floquet_energies(idx, **kwargs):

    return eigvalsh(Rashba_floquet(**kwargs))[idx]
Ejemplo n.º 54
0
    def __init__(self, **kwargs):
        """
        The following parameters must be specified
            H -- the Hamiltonian of the system (N \times N)
            M -- the rank of R matrix (M < N)
            beta (optional) -- the initial value of the inverse temperature
            dbeta -- the inverse temperature increment
            mu (optional) -- the chemical potential, default is zero
        """

        # save all attributes
        for name, value in kwargs.items():
            # if the value supplied is a function, then dynamically assign it as a method;
            # otherwise bind it a property
            if isinstance(value, FunctionType):
                setattr(self, name, MethodType(value, self, self.__class__))
            else:
                setattr(self, name, value)

        # Check that all attributes were specified
        try:
            self.H
        except AttributeError:
            raise AttributeError("The Hamiltonian (H) was not specified")

        try:
            self.M
        except AttributeError:
            raise AttributeError(
                "The rank (M) of the low-rank approximation of the density matrix R was not specified"
            )

        try:
            self.dbeta
        except AttributeError:
            raise AttributeError(
                "The inverse temperature increment (dbeta) was not specified")

        try:
            self.mu
        except AttributeError:
            print(
                "Warning: Chemical potential was not specified, thus it is set to zero."
            )
            self.mu = 0

        try:
            self.beta
        except AttributeError:
            self.beta = 0.

        # First step: symmetrized Hamiltonian
        self.H += self.H.conj().T
        self.H *= 0.5

        # Set the initial condition for the matrix R
        # recall that the density matrix rho = R R^{\dagger}
        self.R = np.eye(self.H.shape[0], self.M, dtype=self.H.dtype)
        self.R *= np.sqrt(0.5)

        # save the identity matrix
        self.identity = sparse.identity(self.M, dtype=self.R.dtype)

        # find eigenvalues of the Hamiltonian for comparision with the exact expression self.get_exact_pop
        self.E = linalg.eigvalsh(
            # Convert a sparse Hamiltonian to a dense matrix
            self.H.toarray() if sparse.issparse(self.H) else self.H)
Ejemplo n.º 55
0
group_index = rank / 4
group_rank = rank % 4
group_indices = np.arange(4) + group_index * 4
new_group = world_group.Incl(group_indices)
new_comm = world_comm.Create(new_group)

# Set the default blocksize
pscore._blocksize = [b, b]

# Process 0 creates the test matrices for each group which are identical up to
# rescaling. The rescaling is such that the greatest eigenvalue is equal to the
# group index.
if rank == 0:
    mat = np.random.standard_normal((n, n))
    mat = mat + mat.T
    mat = mat / la.eigvalsh(mat).max()

    for i in range(world_comm.size):
        np.save("testarr_%i.npy" % i, mat * (i + 1))

world_comm.Barrier()

# Create the Process Context
context = pscore.ProcessContext(comm=new_comm, gridsize=[2, 2])

# Create a distributed matrix and find its eigenvalues
dm = pscore.DistributedMatrix.from_npy("testarr_%i.npy" % group_index,
                                       context=context)
evals, evecs = psr.pdsyevd(dm)

# Print out results for testing.
def RelativeEntropy(rho, sigma):
    # Calculate S(rho || sigma)
    rho_e = eigvalsh(rho)
    sigma_e = eigvalsh(sigma)
    return -rho_e.dot(np.log2(sigma_e)) + rho_e.dot(np.log2(rho_e))
def eigen_values_search(formulation, prob, nodes, K0, K, colors, c1, c2,
                        overwrite_files, draw_plots, generate_plots):

    four_node_edges = [[], [(0, 1)], [(0, 1), (0, 3)], [(0, 1), (2, 3)],
                       [(0, 1), (1, 2), (2, 3)], [(0, 1), (0, 2), (0, 3)],
                       [(0, 1), (1, 2), (2, 0)],
                       [(0, 1), (1, 2), (2, 3), (3, 0)],
                       [(0, 1), (1, 2), (2, 0), (3, 0)],
                       [(0, 1), (1, 2), (2, 3), (3, 0), (0, 2)],
                       [(0, 1), (1, 2), (2, 3), (3, 0), (0, 2), (1, 3)]]

    dir_path = os.path.dirname(os.path.abspath(__file__))
    mingap_path = os.path.join(dir_path, "results/mingap/")
    if formulation == 'nonlinear':
        results_path = os.path.join(mingap_path, "eigenvalues")
    elif formulation == 'linear':
        results_path = os.path.join(mingap_path, "eigenvalues_l")

    solfile = os.path.join(mingap_path, "erdos_" + str(
        formulation) + "_" + str(int(100*prob)) + "_k_" + str(colors) + \
            "_c1_" + str(int(c1)) + "_c2_" + str(int(c2)))

    # Pauli matrices

    Id = [[1, 0], [0, 1]]
    s_x = [[0, 1], [1, 0]]
    s_y = [[0, -1j], [1j, 0]]
    s_z = [[1, 0], [0, -1]]

    # A and B functions such that H = A(s)Hd + B(s)Hp. These are specific to D-Wave 2000Q at NASA.
    # These are old fits by Zoe we will instead use the data provided by DWave directly

    def a_function(s):
        return (2 / math.pi) * (math.exp(3.173265 * s) *
                                (207.253 * (1 - s)**9 + 203.843 *
                                 (1 - s)**7 - 380.659 * (1 - s)**8))

    def b_function(s):
        return (2 / math.pi) * (0.341734 + 6.713285 * s + 32.9702 * s * s)

    # s_list = np.arange(0, 1.0001, 0.001)

    # List of s values you want to use, between 0 and 1

    df = pd.read_excel(os.path.join(
        dir_path, "09-1192A-C_DW_2000Q_2_1_processor-annealing-schedule.xlsx"),
                       sheet_name='DW_2000Q_2_processor-annealing-')

    s_list = df['s']

    # Nested Kronecker that we need for the individual Pauli matrices
    # Performs a Kronecker product on a series of matrices all stored in the variable a

    def nested_kronecker(a):
        if len(a) == 2:
            return np.kron(a[0], a[1])
        else:
            return np.kron(a[0], nested_kronecker(a[1:]))

    # Builds a Pauli matrix acting on qubit j (variable mat is the Pauli matrix we want applied on j, s_x, s_y or s_z)

    def individual_pauli(mat, j, num_of_spins):
        ops = []
        for _ in range(j):
            ops.append(Id)
        ops.append(mat)
        for _ in range(num_of_spins - j - 1):
            ops.append(Id)
        return nested_kronecker(ops)

    columns = ['id', 'eigenval', 'mingap']
    solutions = pd.DataFrame(columns=columns)

    for k in range(K0, K):
        G = nx.Graph()
        # G = nx.cycle_graph(k)
        # G.add_nodes_from([0, 1, 2, 3])
        # G.add_edges_from(four_node_edges[k])
        # G, alpha = devil_graphs(k)
        G = nx.erdos_renyi_graph(n=nodes, p=prob, seed=k)
        temp = dict()

        tolerance = 1  # GHz
        idx_start = 0
        idx_end = len(s_list)
        idx_interval = 100
        decrease = 10

        eigenvals = []
        s_plot = []
        mingap_ss = []
        index_ss = []

        eigenfilename = "erdos_" + \
            str(prob) + "_" + str(k) + "_k_" + str(colors) + \
            "_c1_" + str(int(c1)) + "_c2_" + str(int(c2))
        idxfilename = "erdos_idx_" + \
            str(prob) + "_" + str(k) + "_k_" + str(colors) + \
            "_c1_" + str(int(c1)) + "_c2_" + str(int(c2))

        print(eigenfilename)

        eigenfile = os.path.join(results_path, eigenfilename + ".npy")
        idxfile = os.path.join(results_path, idxfilename + ".npy")

        ti = time()  # start timer

        # If file exists, the load them
        if os.path.exists(eigenfile) and os.path.exists(
                idxfile) and not overwrite_files:
            eigenvals = np.load(eigenfile, allow_pickle=True)
            s_plot = np.load(idxfile, allow_pickle=True)

            for i in range(1, eigenvals.shape[1]):
                if min(abs(eigenvals[:, i] - eigenvals[:, 0])) < tolerance:
                    pass
                else:
                    break
            gap = eigenvals[:, i] - eigenvals[:, 0]
            mingap = min(gap)
            mingap_idx = np.argmin(gap)
            mingap_s = s_plot[mingap_idx]
        else:
            # Calculate full Hamiltonian for the s values in your list and get the eigenvalues and gap
            print('Running eigenvalues')

            # Find the maximum independent set, which is known in this case to be of length 3
            if formulation == 'nonlinear':
                Q, offset = nonlinear(G, k=colors, c1=c1, c2=c2)
            elif formulation == 'linear':
                Q, offset = linear(G, k=colors, c1=c1, c2=c2)
            bqm = dimod.BinaryQuadraticModel.from_qubo(Q, offset=offset)
            h, J, offset = bqm.to_ising()
            indices = dict(
                (key, idx) for (idx, key) in enumerate(bqm.linear.keys()))

            # Number of qubits
            n = len(list(bqm.linear))

            # Build driver and problem Hamiltonians
            Hd = 0
            for i in range(n):
                Hd = np.add(Hd, individual_pauli(s_x, i, n))

            Hp = 0
            for pair in bqm.quadratic:
                Hp = np.add(
                    Hp, J[pair] * individual_pauli(s_z, indices[pair[0]], n) *
                    individual_pauli(s_z, indices[pair[1]], n))

            for qubit in bqm.linear:
                Hp = np.add(
                    Hp, h[qubit] * individual_pauli(s_z, indices[qubit], n))
            # If your Ising Hamiltonian also has an external field you can add it with terms of the form individual_pauli(s_z, i, n)
            while idx_interval >= 1:
                for idx in range(idx_start, idx_end, idx_interval):
                    if df.loc[idx, 's'] in mingap_ss or idx in index_ss:
                        pass
                    else:
                        H = df.loc[idx, 'A(s) (GHz)'] * Hd + \
                            df.loc[idx, 'B(s) (GHz)'] * Hp
                        if n >= 16:
                            sH = sparse.csc_matrix(H)
                            eig = eigs(sH,
                                       10,
                                       which='SR',
                                       tol=1e-3,
                                       return_eigenvectors=False)
                        else:
                            eig = la.eigvalsh(H)
                        eig = np.sort(eig.real)
                        eigenvals.append(eig)
                        # np.append(eig, eigenvals, axis=0)
                        s_plot.append(df.loc[idx, 's'])

                s_plot, eigenvals = (list(t) for t in zip(
                    *sorted(zip(s_plot, eigenvals))))

                eigenvalues = np.array(eigenvals)

                for i in range(1, eigenvalues.shape[1]):
                    if min(abs(eigenvalues[:, i] -
                               eigenvalues[:, 0])) < tolerance:
                        pass
                    else:
                        break
                print('Minimup gap computed with ' + str(i) + 'th eigenvalue')
                gap = eigenvalues[:, i] - eigenvalues[:, 0]

                mingap = min(gap)
                mingap_idx = np.argmin(gap)
                mingap_s = s_plot[mingap_idx]
                mingap_ss.append(mingap_s)
                mingap_index = df.index[df['s'] == mingap_s].tolist()

                print('Minimup gap: ' + str(mingap) + ' GHz')
                print('At s= ' + str(mingap_s))

                idx_end = mingap_index[0] + idx_interval
                idx_start = mingap_index[0] - idx_interval
                index_ss.append(idx_start)
                index_ss.append(idx_end)
                index_ss.append(mingap_index[0])
                idx_interval = int(idx_interval / decrease)

            tf = time()  # start timer
            print("simulation took {0:.4f} sec".format(tf - ti))
            temp['time'] = tf - ti
            eigenvals = np.array(eigenvals)
            np.save(eigenfile, eigenvals)
            np.save(idxfile, s_plot)

        temp['id'] = eigenfilename
        temp['k'] = colors
        temp['eigenval'] = i
        temp['mingap'] = mingap
        temp['c1'] = c1
        temp['c2'] = c2

        if generate_plots:

            # plt.figure(0)
            # df.plot(x='s', y=['A(s) (GHz)', 'B(s) (GHz)'])
            # plt.xlabel('Adimensional time $s$')
            # plt.ylabel('Energy (GHz)')
            # plt.legend()
            # plt.xlim(0, 1)
            #
            plt.figure(1)
            plt.plot(s_plot, eigenvals)
            plt.xlabel('Adimensional time $s$')
            plt.ylabel('Hamiltonian eigenvalues')
            plt.xlim(0, 1)
            plt.savefig(
                os.path.join(
                    results_path, "erdos_" + str(prob) + "_" + str(k) + "_M" +
                    str(c1) + "all_eigs.png"))
            #
            plt.figure(2)
            plt.plot(s_plot, eigenvals, '0.75')
            plt.plot(s_plot, eigenvals[:, i])
            plt.plot(s_plot, eigenvals[:, 0])
            plt.axvline(mingap_s, color='k', linestyle="dashed")
            plt.xlabel('Adimensional time $s$')
            plt.ylabel('Hamiltonian eigenvalues')
            plt.xlim(0, 1)
            plt.savefig(
                os.path.join(
                    results_path, "erdos_" + str(prob) + "_" + str(k) + "_c1" +
                    str(c1) + "_c2" + str(c2) + "eigs.png"))

            plt.figure(3)
            plt.plot(s_plot, gap, '*')
            plt.vlines(mingap_s, 0, mingap, linestyle="dashed")
            plt.hlines(mingap, 0, mingap_s, linestyle="dashed")
            plt.xlabel('Adimensional time $s$')
            plt.ylabel('Eigenvalues gap $\Delta$')
            plt.xlim(0, 1)
            plt.ylim(0, None)
            plt.savefig(
                os.path.join(
                    results_path, "erdos_" + str(prob) + "_" + str(k) + "_c1" +
                    str(c1) + "_c2" + str(c2) + "gap.png"))

            if draw_plots:
                plt.show()

            plt.figure(1).clf()
            plt.figure(2).clf()
            plt.figure(3).clf()

        solutions = solutions.append(temp, ignore_index=True)
        solutions.to_csv(solfile + ".csv")
    sol_total = pd.DataFrame.from_dict(solutions)
    sol_total.to_csv(solfile + ".csv")
    sol_total.to_excel(solfile + ".xlsx")
Ejemplo n.º 58
0
    def maximizationParam(self, X, post, regularizer=0):
        [N, dim] = X.shape
        K = len(self.listSigmaInds)
        S = len(self.listSigmaType)
        dtype = X.dtype

        self.prioriProb = np.sum(post[:, :K], axis=0,
                                 keepdims=True).transpose([1, 0])

        self.mu = np.tensordot(post, X, (0, 0)) / self.prioriProb
        for s in range(S):
            sigmaType = self.listSigmaType[s]
            if sigmaType == 2:  # full covariance
                sigma = np.zeros([dim, dim], dtype=dtype)
                sigmadem = np.zeros([], dtype=dtype)
                for k in range(K):
                    if s == self.listSigmaInds[k]:
                        Xmu = X - self.mu[(k, ), :]
                        Xmu = np.sqrt(post[:, (k, )]) * Xmu
                        sigma = sigma + np.tensordot(Xmu, Xmu, (0, 0))
                        sigmadem += self.prioriProb[k, 0]
                sigma = sigma / sigmadem
                if regularizer > 0:
                    sigma = sigma + regularizer * np.eye(dim)
                elif regularizer < 0:
                    # sigma = sigma - regularizer * np.spacing(np.max(np.linalg.eigvalsh(sigma))) * np.eye(dim)
                    sigma = sigma + np.abs(regularizer * np.spacing(
                        eigvalsh(sigma,
                                 eigvals=(dim - 1, dim - 1)))) * np.eye(dim)
            elif sigmaType == 1:  # diagonal covariance
                sigma = np.zeros([1, dim], dtype=dtype)
                sigmadem = np.zeros([], dtype=dtype)
                for k in range(K):
                    if s == self.listSigmaInds[k]:
                        Xmu = X - self.mu[(k, ), :]
                        sigma = sigma + np.tensordot(post[:, (k, )],
                                                     (Xmu * Xmu), (0, 0))
                        sigmadem += self.prioriProb[k, 0]
                sigma = sigma / sigmadem
                if regularizer > 0:
                    sigma = sigma + regularizer
                elif regularizer < 0:
                    sigma = sigma + +np.abs(
                        regularizer * np.spacing(np.max(sigma)))
            else:  # isotropic covariance
                sigma = np.zeros([], dtype=dtype)
                sigmadem = np.zeros([], dtype=dtype)
                for k in range(K):
                    if s == self.listSigmaInds[k]:
                        Xmu = X - self.mu[(k, ), :]
                        sigma = sigma + np.dot(post[:, k],
                                               np.mean((Xmu * Xmu), axis=1))
                        sigmadem += self.prioriProb[k, 0]
                sigma = sigma / sigmadem
                if regularizer > 0:
                    sigma = sigma + regularizer
                elif regularizer < 0:
                    sigma = sigma + np.abs(regularizer * np.spacing(sigma))
            self.listSigma[s] = sigma

        # normalize PComponents
        if self.outliersProb < 0:
            self.prioriProb = self.prioriProb / np.sum(self.prioriProb)
        else:
            self.outliersProb = np.sum(post[:, K])
            dem = self.outliersProb + np.sum(self.prioriProb)
            self.prioriProb = self.prioriProb / dem
            self.outliersProb = self.outliersProb / dem
Ejemplo n.º 59
0
def _find_unitary_blocks(HLs,
                         HRs,
                         projectors,
                         squares_to_1=True,
                         conjugate=False,
                         ev_test=True,
                         sparse=False):
    """Find candidate symmetry linear spaces in all blocks.
    HLs and HRs are lists of reduced Hamiltonians (families) that go to left and right side
    of the equations.

    Returns a dictionary {(i, j): Uij} of all symmetry candidate blocks that have a
    nonzero solution of Uij @ HLs[j] = HRs[i] @ Uij for Uij.

    If squares_to_1=True, it is assumed that the operators square is proportional to 1
    in every block. The search is limited to j <= i, the diagonal blocks have a phase choice
    and the off-diagonal blocks with j > i are constructed to ensure squaring to +-1.
    Otherwise the blocks Uij and Uji are calculated independently.

    If ev_test=True the eigenvalues of the matrices are tested first
    """
    # Only need to find symmetries in half of each block of the Hamiltonian.
    # We take blocks in the lower triangular half and on the diagonal.
    block_dict = {}
    ind = range(len(projectors))
    # Pretest eigenvalues
    if ev_test:
        evsL = [[la.eigvalsh(h) for h in HLs[i]] for i in ind]
        evsR = [[la.eigvalsh(h) for h in HRs[i]] for i in ind]
    for (i, j) in it.product(ind, ind):
        # Only do j <= i if squares to 1
        if squares_to_1 and j > i:
            continue
        # Only allowed between blocks of identical shape
        if projectors[i].shape != projectors[j].shape:
            continue
        # Pretest eigenvalues
        if ev_test:
            if not allclose(evsL[j], evsR[i]):
                continue
        # Find block ij of the symmetry operator
        block_dsymm = solve_mat_eqn(HLs[j],
                                    HRs[i],
                                    hermitian=False,
                                    traceless=False,
                                    sparse=sparse,
                                    k_max=2)
        # Normalize block_dsymm such that it is close to unitary. The matrix
        # returned by solve_mat_eqn is normalized such that Tr(X.T.conj() @ X) is close to 1.
        block_dsymm = np.sqrt(block_dsymm.shape[-1]) * block_dsymm
        # If the space is not empty, we store it and the indices of the block.
        if len(block_dsymm):
            # There should be only one solution, which is invertible
            if len(block_dsymm) > 1 or np.isclose(la.det(block_dsymm[0]), 0):
                raise ValueError('Hamiltonian blocks have residual symmetry.')
            block_dsymm = block_dsymm[0]
            assert allclose(block_dsymm @ HLs[j], HRs[i] @ block_dsymm)
            # The block must be proportional to a unitary
            prop_to_I, coeff = prop_to_id(block_dsymm.dot(
                block_dsymm.T.conj()))
            assert prop_to_I and np.isclose(np.imag(coeff),
                                            0) and np.real(coeff) > 0
            # Normalize such that it is unitary
            block_dsymm = block_dsymm / np.sqrt(coeff)
            block_dict[(i, j)] = block_dsymm
            # If squares to 1, fill out the lower triangle
            if squares_to_1:
                block_dict[(i, j)], block_dict[(j, i)] = _nice_square(
                    block_dsymm, (i == j), conjugate)
    return block_dict
Ejemplo n.º 60
0
def plot_mohr3d(S, pf, sigma, tau, colors, mu=0.45, ax=None, vrange=(0, 70)):
    r"""Plot 3D Mohr circles."""

    S3, S2, S1 = eigvalsh(S)
    S1 -= pf
    S2 -= pf
    S3 -= pf
    R_maj = 0.5 * (S1 - S3)
    cent_maj = 0.5 * (S1 + S3)

    R_min = 0.5 * (S2 - S3)
    cent_min = 0.5 * (S2 + S3)

    R_mid = 0.5 * (S1 - S2)
    cent_mid = 0.5 * (S1 + S2)
    circ1 = plt.Circle((cent_maj, 0),
                       R_maj,
                       facecolor='steelblue',
                       lw=0,
                       edgecolor='#5c8037',
                       alpha=0.5,
                       zorder=0)
    circ2 = plt.Circle((cent_min, 0),
                       R_min,
                       facecolor='steelblue',
                       lw=0,
                       alpha=0.5,
                       zorder=0)
    circ3 = plt.Circle((cent_mid, 0),
                       R_mid,
                       facecolor='steelblue',
                       lw=0,
                       alpha=0.5,
                       zorder=0)
    if not ax:
        fig, ax = plt.subplots()
    ax.add_artist(circ1)
    ax.add_artist(circ2)
    ax.add_artist(circ3)
    # Plot failure criterion
    x = np.arange(10)
    y = x * mu
    ax.plot(x, y, color='k', linestyle=':', linewidth=1.5, alpha=0.5)
    # Plot fractures
    pts = ax.scatter(sigma,
                     tau,
                     c=colors,
                     label='Fractures',
                     s=30,
                     alpha=0.7,
                     cmap='magma_r',
                     vmin=vrange[0],
                     vmax=vrange[1],
                     edgecolor='k',
                     linewidth=0.5)
    ax.set_xlim(0, 6)
    ax.set_ylim(0., 1.1 * R_maj)
    ax.set_aspect('equal')
    ax.set_xlabel(r"$\sigma$ [MPa]", size=14)
    ax.set_ylabel(r"$\tau$ [MPa]", size=14)
    ax.spines['right'].set_visible(False)
    ax.spines['top'].set_visible(False)
    return pts