Example #1
0
File: common.py Project: wathen/PhD
def BlockPerm(ordering, FS):
    Nu = FS['Velcity'].dim()
    Np = FS['Pressure'].dim()
    Nb = FS['Magnetic'].dim()
    Nr = FS['Multiplier'].dim()
    u = identity(Nu)
    p = identity(Np)
    b = identity(Nb)
    r = identity(Nr)

    if ordering == ['u', 'b', 'p', 'r']:
        A = bmat([[u, None, None, None],
                [None, None, p, None]
                [None, b, None, None]
                [None, None, None, r]])
    # elif ordering == ['b', 'u', 'p', 'r']:
    #     A = bmat([[None, u, None, None],
    #             [b, None, None, None]
    #             [None, None, p, None]
    #             [None, None, None, r]])
    # elif ordering == ['b', 'u', 'r', 'p']:
    #     A = bmat([[None, u, None, None],
    #             [b, None, None, None]
    #             [None, None, None, r]
    #             [None, None, p, None]])
    # elif ordering == ['u', 'b', 'r', 'p']:
    #     A = bmat([[u, None, None, None],
    #             [None, None, b, r]
    #             [None, b, None, None]
    #             [None, None, p, None]])


    return 1
Example #2
0
def test_get_representations():

    model = LightFM(random_state=SEED)
    model.fit_partial(train, epochs=10)

    num_users, num_items = train.shape

    for (item_features, user_features) in (
        (None, None),
        (
            (sp.identity(num_items) + sp.random(num_items, num_items)),
            (sp.identity(num_users) + sp.random(num_users, num_users)),
        ),
    ):

        test_predictions = model.predict(
            test.row, test.col, user_features=user_features, item_features=item_features
        )

        item_biases, item_latent = model.get_item_representations(item_features)
        user_biases, user_latent = model.get_user_representations(user_features)

        assert item_latent.dtype == np.float32
        assert user_latent.dtype == np.float32

        predictions = (
            (user_latent[test.row] * item_latent[test.col]).sum(axis=1)
            + user_biases[test.row]
            + item_biases[test.col]
        )

        assert np.allclose(test_predictions, predictions, atol=0.000001)
Example #3
0
    def update(self):
        """Update basis and mixture matrix based on Euclidean distance multiplicative update rules."""
        # self.H = multiply(
        #     self.H, elop(dot(self.W.T, self.V), dot(self.W.T, dot(self.W, self.H)), div))
        # self.W = multiply(
        #     self.W, elop(dot(self.V, self.H.T), dot(self.W, dot(self.H, self.H.T)), div))
        rank = self.H.shape[0]

        self.W = self.W.tolil()
        for row, cols in self.row_to_cols.items():
            Hsub = self.H.tocsc()[:,cols]
            E = sp.identity(rank, dtype=np.float64)
            A = dot(Hsub, Hsub.T) + self.lambda_var * len(cols) * E
            Ainv = sp.csr_matrix(li.inv(A.toarray()), dtype=np.float64)
            Vsub = self.V.tocsr()[[row],:].tocsc()[:,cols]
            self.W[row,:] = dot(dot(Ainv, Hsub), Vsub.T).T
        self.W = self.W.tocsr()

        self.H = self.H.tolil()
        for col, rows in self.col_to_rows.items():
            Wsub = self.W.tocsr()[rows,:]
            E = sp.identity(rank, dtype=np.float64)
            A = dot(Wsub.T, Wsub) + self.lambda_var * len(rows) * E
            Ainv = sp.csr_matrix(li.inv(A.toarray()), dtype=np.float64)
            Vsub = self.V.tocsr()[rows,:].tocsc()[:,[col]]
            self.H[:,col] = dot(dot(Ainv, Wsub.T), Vsub)
        self.H = self.H.tocsr()
Example #4
0
	def __init__(self,lhgen,rhgen,target_sector=0.,joint=True):
		self.lhgen=deepcopy(lhgen)
		self.rhgen=deepcopy(rhgen)
		self.L=self.lhgen.l+self.rhgen.l
		self.H=kron(self.lhgen.H,identity(self.rhgen.D))+kron(identity(self.lhgen.D),self.rhgen.H)
		if joint==True:
			for lpterm in self.lhgen.pterms:
				for rpterm in self.rhgen.pterms:
					if lpterm.label==rpterm.label: #label must include all the important imformation
						self.H=self.H+kron(lpterm.current_op.mat,rpterm.current_op.mat)*lpterm.param

		self.target_sector=target_sector
		self.sector_indices={}
		self.rsector_indices={}
		self.restricted_basis_indices=[]
		for sys_sec,sys_basis_states in self.lhgen.basis_by_sector.items():
			self.sector_indices[sys_sec]=[]
			env_sec=target_sector-sys_sec
			if env_sec in self.rhgen.basis_by_sector:
				self.rsector_indices[env_sec]=[]
				for i in sys_basis_states:
					i_offset=self.rhgen.D*i
					for j in self.rhgen.basis_by_sector[env_sec]:
						current_index=len(self.restricted_basis_indices)
						self.sector_indices[sys_sec].append(current_index)
						self.rsector_indices[env_sec].append(current_index)
						self.restricted_basis_indices.append(i_offset+j)
		self.restricted_superblock_hamiltonian=self.H.todense()[:,self.restricted_basis_indices][self.restricted_basis_indices,:]
def get_prediction_matrices(adj_matrix, discount_factor, max_cycle_order):
  if max_cycle_order == np.inf: #compute signed Katz measure
    if type(discount_factor) is not float:
      raise ValueError("discount factor must be float") #TODO must be sufficiently small (< ||A||_2) too? 
    prediction_matrix = sp.identity(adj_matrix.shape[0])
    prediction_matrix = prediction_matrix - discount_factor * adj_matrix
    prediction_matrix = sp.linalg.inv(prediction_matrix)
    prediction_matrix = prediction_matrix - sp.identity(adj_matrix.shape[0])
    prediction_matrix = prediction_matrix - discount_factor * adj_matrix
    return prediction_matrix
  else:
    if type(discount_factor) is not list:
      raise ValueError("for finite max cycle order, must provide list of discount factors for each cycle order between 3 and max")

    #Load in products of adjacency matrix of power up to max cycle order
    #Compute if needed
    products = list()
    #'''
    current_product = sp.csr_matrix(adj_matrix)
    order = 3
    while order <= max_cycle_order:
      highest_power_product = None
      current_product = current_product.dot(adj_matrix) #compute next higher power product
      products.append(current_product) #add this to our list of products used to compute MOI
      order += 1
    return products
Example #6
0
    def _construct_feature_matrices(self, n_users, n_items, user_features,
                                    item_features):

        if user_features is None:
            user_features = sp.identity(n_users,
                                        dtype=CYTHON_DTYPE,
                                        format='csr')
        else:
            user_features = user_features.tocsr()

        if item_features is None:
            item_features = sp.identity(n_items,
                                        dtype=CYTHON_DTYPE,
                                        format='csr')
        else:
            item_features = item_features.tocsr()

        if n_users > user_features.shape[0]:
            raise Exception('Number of user feature rows does not equal '
                            'the number of users')

        if n_items > item_features.shape[0]:
            raise Exception('Number of item feature rows does not equal '
                            'the number of items')

        # If we already have embeddings, verify that
        # we have them for all the supplied features
        if self.user_embeddings is not None:
            assert self.user_embeddings.shape[0] >= user_features.shape[1]

        if self.item_embeddings is not None:
            assert self.item_embeddings.shape[0] >= item_features.shape[1]

        return user_features, item_features
def enlarge_block(block):
    """This function enlarges the provided Block by a single site, returning an
    EnlargedBlock.
    """
    mblock = block.basis_size
    o = block.operator_dict

    # Create the new operators for the enlarged block.  Our basis becomes a
    # Kronecker product of the Block basis and the single-site basis.  NOTE:
    # `kron` uses the tensor product convention making blocks of the second
    # array scaled by the first.  As such, we adopt this convention for
    # Kronecker products throughout the code.
    enlarged_operator_dict = {
        "H": kron(o["H"], identity(model_d)) + kron(identity(mblock), H1) + H2(o["conn_Sz"], o["conn_Sp"], Sz1, Sp1),
        "conn_Sz": kron(identity(mblock), Sz1),
        "conn_Sp": kron(identity(mblock), Sp1),
    }

    # This array keeps track of which sector each element of the new basis is
    # in.  `np.add.outer()` creates a matrix that adds each element of the
    # first vector with each element of the second, which when flattened
    # contains the sector of each basis element in the above Kronecker product.
    enlarged_basis_sector_array = np.add.outer(block.basis_sector_array, single_site_sectors).flatten()

    return EnlargedBlock(length=(block.length + 1),
                         basis_size=(block.basis_size * model_d),
                         operator_dict=enlarged_operator_dict,
                         basis_sector_array=enlarged_basis_sector_array)
Example #8
0
def _get_feature_matrices(interactions):

    no_users, no_items = interactions.shape

    user_features = sp.identity(no_users, dtype=np.int32).tocsr()
    item_features = sp.identity(no_items, dtype=np.int32).tocsr()

    return (user_features.tocsr(), item_features.tocsr())
Example #9
0
def sfermion(L):
	H=np.zeros((4,4))
	d=1
	for i in range(L-1):
		H=kron(H,identity(4))+t*kron(kron(identity(d),Cp_up.dot(Zs)),Cm_up)-t*kron(kron(identity(d),Cm_up.dot(Zs)),Cp_up)+t*kron(kron(identity(d),Cp_dn.dot(Zs)),Cm_dn)-t*kron(kron(identity(d),Cm_dn.dot(Zs)),Cp_dn)
		d*=4
	E0,psi0=eigsh(H,k=1,which='SA')
	print E0/L		
Example #10
0
	def __init__(self,lhgen,rhgen):
		self.lhgen=deepcopy(lhgen)
		self.rhgen=deepcopy(rhgen)
		self.H=kron(self.lhgen.H,identity(self.rhgen.D))+kron(identity(self.lhgen.D),self.rhgen.H)
		for lpterm in self.lhgen.part_terms:
			for rpterm in self.rhgen.part_terms:
				if all(lpterm.label)==all(rpterm.label) and (lpterm.site1+self.rhgen.N-self.lhgen.l-self.rhgen.l,lpterm.site2+self.rhgen.N-self.lhgen.l-self.rhgen.l)==(rpterm.site1,rpterm.site2): 
					#this doesn't work in mirror image
					self.H=self.H+kron(lpterm.op1,rpterm.op2)*lpterm.param
Example #11
0
 def fit(self, A, u):
     n = A.shape[0]
     D = sparse.diags(np.asarray(A.sum(axis=1).T), [0])
     L = D - A
     self.f = la.spsolve(L+self.alpha*sparse.identity(n), u)
     #self.f = la.spsolve(self.beta*L/(n**2)+self.alpha*sparse.identity(n), u) # <- this is the correct formula,
     self.f = la.spsolve(self.beta*L+self.alpha*sparse.identity(n), u) # but we use this one for convenience
     self.f = np.maximum(0.0, self.f)
     return self
Example #12
0
def _jdeq_solve(A,Q,MQ,iKMQ,r,M,invK,F,shift,tol,method='lgmres',precon=False,maxiter=20):
    '''
    Solve Jacobi-Davidson's correction equation.
        (I-Q*Q.H)(A-theta*I)(I-Q*Q.H)z = -r, with z.T*Q = 0

    Parameters:
        :A: matrix(N,N), the matrix.
        :Q,MQ,iKMQ: matrix(N,k), Q,M*Q and K^-1*M*Q, Q is the search space, z is orthogonal to Q.
        :r: 1D array, the distance.
        :M: matrix(N,N), the preconditioner for JD.
        :invK: matrix(N,N), part of preconditioner for this solver.
        :F: matrix(k,k), the space of converged eigenvectors.
        :shift: float, the disired eigenvalue region.
        :tol: float, the tolerence.
        :method: str, the method to solve linear equation.

            * 'gmres'/'lgmres', generalized Mininal residual iteration.
            * 'cg'/'bicg'/'bicgstab', (Bi)conjugate gradient method.
            * not used methods, `minres(minimum residual)` and `qmr(quasi-minimum residual)` are designed for real symmetric matrices,\
                    `cgs` and `symmlq` is not used for the same reason.\
                    `lsqr`/`lsmr`(least square methods) are working poorly here.
        :precon: bool, use preconditioner if True.

    Return:
        1D array, the result z.
    '''
    N,k=MQ.shape
    MQH=MQ.T.conj()
    MAT=A-shift*(M if M is not None else sps.identity(N))
    invF=inv(F)
    x0=random.random(N)*(tol/N)
    if not precon:
        if invK is not None:
            MAT=(invK,MAT)
            r=invK.dot(r)
        system_matrix=_JDLinear((iKMQ.dot(invF),MQH),MAT)
        precon=None
        right_hand_side=-r+iKMQ.dot(invF).dot(MQH.dot(r))
    else:
        QH=Q.T.conj()
        system_matrix=_JDLinear((MQ,QH),MAT)
        precon=_JDLinear((iKMQ.dot(invF),MQH),invK if invK is not None else sps.identity(N))
        right_hand_side=-r+Q.dot(QH.dot(r))
    if method=='cg' or method=='bicg' or method=='bicgstab':
        if method=='cg': solver=lin.cg 
        elif method=='bicg': solver=lin.bicg
        else: solver=lin.bicgstab
        z,info=solver(system_matrix,right_hand_side,tol=tol,M=precon,maxiter=maxiter,x0=x0)
    elif method=='gmres' or method=='lgmres':
        if method=='gmres': solver=lin.gmres
        else: solver=lin.lgmres
        z,info=solver(system_matrix,right_hand_side,tol=tol,M=precon,maxiter=maxiter,x0=x0)
    else:
        raise Exception('Unknown method for linear solver %s'%method)
    return z
Example #13
0
	def check_cano(self):
		if self.mps.cano=='left':
			div=self.mps.L
		elif self.mps.cano=='right':
			div=0
		else:
			div=self.mps.div
		for M in self.mps.Ms[:div]:
			print np.tensordot(M.conjugate().transpose(),M,axes=([1,2],[1,0]))-identity(M.shape[2])	
		for M in self.mps.Ms[div+1:self.mps.L]:
			print np.tensordot(M,M.conjugate().transpose(),axes=([1,2],[1,0]))-identity(M.shape[0])
Example #14
0
 def timeEvolution(self,tau,duration):
   self.duration = duration
   # Define A and B matrices
   A = sp.identity(self.gridLength) - tau/(2j)*self.H
   B = sp.identity(self.gridLength) + tau/(2j)*self.H
   self.time_evolved_psi = np.zeros((self.gridLength,duration),dtype=complex)
   # Start time evolution
   for i in range(0,duration):
     self.time_evolved_psi[:,i],_ = linalg.bicgstab(A,B.dot(self.psi).transpose(),tol=1e-10)
     self.psi = self.time_evolved_psi[:,i]
   if str.lower(self.potentialName) == "rectangular barrier":
     return np.trapz(np.abs(self.psi[self.barrierEnd:])**2)
Example #15
0
	def get_proj(self,dtype,full_1=True,full_2=True):
		if full_1:
			proj1 = self._b1.get_proj(dtype)
		else:
			proj1 = _sp.identity(self._b1.Ns,dtype=dtype)

		if full_2:
			proj2 = self._b2.get_proj(dtype)
		else:
			proj2 = _sp.identity(self._b2.Ns,dtype=dtype)


		return _sp.kron(proj1,proj2,format="csr")
Example #16
0
    def get_cochain_basis(self, dimension, is_primal=True):
        N = self.complex_dimension()

        if not 0 <= dimension <= N:
            raise ValueError('invalid dimension (%d)' % dimension)
        
        c   = cochain(self, dimension, is_primal)        
        
        if is_primal:
            c.v = sparse.identity(self[dimension].num_simplices)
        else:
            c.v = sparse.identity(self[N - dimension].num_simplices)
        
        return c   
Example #17
0
    def _construct_feature_matrices(
        self, n_users, n_items, user_features, item_features
    ):

        if user_features is None:
            user_features = sp.identity(n_users, dtype=CYTHON_DTYPE, format="csr")
        else:
            user_features = user_features.tocsr()

        if item_features is None:
            item_features = sp.identity(n_items, dtype=CYTHON_DTYPE, format="csr")
        else:
            item_features = item_features.tocsr()

        if n_users > user_features.shape[0]:
            raise Exception(
                "Number of user feature rows does not equal " "the number of users"
            )

        if n_items > item_features.shape[0]:
            raise Exception(
                "Number of item feature rows does not equal " "the number of items"
            )

        # If we already have embeddings, verify that
        # we have them for all the supplied features
        if self.user_embeddings is not None:
            if not self.user_embeddings.shape[0] >= user_features.shape[1]:
                raise ValueError(
                    "The user feature matrix specifies more "
                    "features than there are estimated "
                    "feature embeddings: {} vs {}.".format(
                        self.user_embeddings.shape[0], user_features.shape[1]
                    )
                )

        if self.item_embeddings is not None:
            if not self.item_embeddings.shape[0] >= item_features.shape[1]:
                raise ValueError(
                    "The item feature matrix specifies more "
                    "features than there are estimated "
                    "feature embeddings: {} vs {}.".format(
                        self.item_embeddings.shape[0], item_features.shape[1]
                    )
                )

        user_features = self._to_cython_dtype(user_features)
        item_features = self._to_cython_dtype(item_features)

        return user_features, item_features
 def construct_superblock_hamiltonian(self, sys_enl, env_enl):
     sys_enl_op = sys_enl.operator_dict
     env_enl_op = env_enl.operator_dict
     if self.boundary_condition == open_bc:
         # L**R
         H_int = self.H2(sys_enl_op["conn_b"], env_enl_op["conn_b"])
     else:
         assert self.boundary_condition == periodic_bc
         # L*R*
         H_int = (self.H2(sys_enl_op["r_b"], env_enl_op["l_b"]) +
                  self.H2(sys_enl_op["l_b"], env_enl_op["r_b"]))
     return (kron(sys_enl_op["H"], identity(env_enl.basis_size)) +
             kron(identity(sys_enl.basis_size), env_enl_op["H"]) +
             H_int)
Example #19
0
    def solve_linear_system(self, A=None, use_Wendland_compsupp=True, regularisation=None, save_Gram_rows=[False,None],
                            verbose=False):
        """
        Solves the linear system defined by generalised interpolation problem
        :param A: Interpolation matrix
        :param use_Wendland_compsupp: When A is not given and the kernel is Wendland kernel, use the
        compact support to speed up population of interpolation matrix.
        :param save_Gram_rows: Option to save Gram matrix row-by-row - 2nd list element is for path
                            (currently only for gram_Wendland())
        :return: solution vector stored in self.coefficients
        """
        #TODO: Implement save_Gram_rows for other gram functions
        if regularisation!=None:
            self.regularisation=regularisation

        # Check the Data list is not empty
        data_empty = True
        for data_object in self.data_points:
            if data_object.numpoints > 0: data_empty = False
        if data_empty == True:
            print("ERROR: Cannot solve linear system - no data points")
            exit(1)
        self._check_for_empty_Data()
        if A==None:
            if verbose: print('making Gram matrix')
            if use_Wendland_compsupp==False or not isinstance(self.K, Wendland):
                if verbose: print('using gram')
                self.gram = gram(self)
            else:
                if verbose: print('using gram_Wendland')
                self.gram = gram_Wendland(self, save_rows=save_Gram_rows)
            if verbose: print('finished making Gram matrix')
        else:
            self._check_gram(A)
            self.gram = A
        #if self.regularisation != 0:
        #    print('Adding regularisation: ', self.regularisation)
        #    self.gram = self.gram + (self.regularisation * sparse.identity(self.gram.shape[0]))
        if verbose: print('solving linear system')
        self.beta = np.array([])
        for data_object in self.data_points:
            #self.dim = data_object.points.shape[0]
            self.beta = np.hstack((self.beta, data_object.targets))
        if sparse.issparse(self.gram):
            if not isinstance(self.gram, sparse.csr.csr_matrix):
                self.gram = sparse.csr_matrix(self.gram)
            self.coefficients = spsolve(self.gram + (self.regularisation * sparse.identity(self.gram.shape[0])), self.beta)
        else:
            self.coefficients = np.linalg.solve(self.gram + (self.regularisation * sparse.identity(self.gram.shape[0])), self.beta)
        if verbose: print('finished linear system')
Example #20
0
def generate_prefix_dyn_cstr(G, T, init):
    """Generate equalities (47c), (47e) for prefix dynamics"""
    K = G.K()
    M = G.M()

    # variables: u[0], ..., u[T-1], x[1], ..., x[T]

    # Obtain system matrix
    B = G.system_matrix()

    # (47c)
    # T*K equalities
    A_eq1_u = sp.block_diag((B,) * T)
    A_eq1_x = sp.block_diag((sp.identity(K),) * T)
    b_eq1 = np.zeros(T * K)

    # (47e)
    # T*K equalities
    A_eq2_u = sp.block_diag((_id_stacked(K, M),) * T)
    A_eq2_x = sp.bmat([[sp.coo_matrix((K, K * (T - 1))),
                        sp.coo_matrix((K, K))],
                       [sp.block_diag((sp.identity(K),) * (T - 1)),
                        sp.coo_matrix((K * (T - 1), K))]
                       ])
    b_eq2 = np.hstack([init, np.zeros((T - 1) * K)])

    # Forbid non-existent modes
    # T * len(ban_idx) equalities
    ban_idx = [G.order_fcn(v) + m * K
               for v in G.nodes_iter()
               for m in range(M)
               if G.mode(m) not in G.node_modes(v)]
    A_eq3_u_part = sp.coo_matrix(
        (np.ones(len(ban_idx)), (range(len(ban_idx)), ban_idx)),
        shape=(len(ban_idx), K * M)
    )
    A_eq3_u = sp.block_diag((A_eq3_u_part,) * T)
    A_eq3_x = sp.coo_matrix((T * len(ban_idx), T * K))
    b_eq3 = np.zeros(T * len(ban_idx))

    # Stack everything
    A_eq_u = sp.bmat([[A_eq1_u],
                     [A_eq2_u],
                     [A_eq3_u]])
    A_eq_x = sp.bmat([[-A_eq1_x],
                      [-A_eq2_x],
                      [A_eq3_x]])
    b_eq = np.hstack([b_eq1, b_eq2, b_eq3])

    return A_eq_u, A_eq_x, b_eq
def create_lp_matrix(A, min_reviewers_per_paper=0, max_reviewers_per_paper=10,
                        min_papers_per_reviewer=0, max_papers_per_reviewer=10):
    """
    The problem formulation of paper-reviewer matching problem is as follow:
    we want to maximize this cost function with constraint

        maximize A.T * b
        subject to N_p * b <= c_p (c_p = maximum number of reviewer per paper)
                   N_r * b <= c_r (c_r = maximum number of paper per reviewer)
                   b <= 1
                   b >= 0

    This problem can be reformulate as
        maximize A.T * b
        subject to K * b <= d
        where K = [N_p; N_r; I; -I] and d = [c_p, c_r, 1, 0]

    where A is an affinity matrix (e.g. topic distance matrix)
          N is node edge adjacency matrix, N = [N_p; N_r; I; -I]
          d is column constraint vector, d = [c_p, c_r, 1, 0]

    Reference
    ---------
    Taylor, Camillo J. "On the optimal assignment of conference papers to reviewers." (2008).
    """
    n_papers, n_reviewers = A.shape
    n_edges = np.count_nonzero(A)

    i, j = A.nonzero()
    v = A[i, j]

    N_e = sp.dok_matrix((n_papers + n_reviewers, n_edges), dtype=np.float)
    N_e[i, range(n_edges)] = 1
    N_e[j + n_papers, range(n_edges)] = 1

    N_p = sp.dok_matrix((n_papers, n_edges), dtype=np.int)
    N_p[i, range(n_edges)] = -1

    N_r = sp.dok_matrix((n_reviewers, n_edges), dtype=np.int)
    N_r[j, range(n_edges)] = -1

    K = sp.vstack([N_e, N_p, N_r, sp.identity(n_edges), -sp.identity(n_edges)])

    d = [max_reviewers_per_paper] * n_papers + [max_papers_per_reviewer] * n_reviewers + \
        [-min_reviewers_per_paper] * n_papers + [-min_papers_per_reviewer] * n_reviewers + \
        [1] * n_edges + [0] * n_edges
    d = np.atleast_2d(d).T # column constraint vector

    return v, K, d
Example #22
0
def makePropertyTensor(M, tensor):
    if tensor is None:  # default is ones
        tensor = np.ones(M.nC)

    if isScalar(tensor):
        tensor = tensor * np.ones(M.nC)

    propType = TensorType(M, tensor)
    if propType == 1: # Isotropic!
        Sigma = sp.kron(sp.identity(M.dim), sdiag(mkvc(tensor)))
    elif propType == 2: # Diagonal tensor
        Sigma = sdiag(mkvc(tensor))
    elif M.dim == 2 and tensor.size == M.nC*3:  # Fully anisotropic, 2D
        tensor = tensor.reshape((M.nC,3), order='F')
        row1 = sp.hstack((sdiag(tensor[:, 0]), sdiag(tensor[:, 2])))
        row2 = sp.hstack((sdiag(tensor[:, 2]), sdiag(tensor[:, 1])))
        Sigma = sp.vstack((row1, row2))
    elif M.dim == 3 and tensor.size == M.nC*6:  # Fully anisotropic, 3D
        tensor = tensor.reshape((M.nC,6), order='F')
        row1 = sp.hstack((sdiag(tensor[:, 0]), sdiag(tensor[:, 3]), sdiag(tensor[:, 4])))
        row2 = sp.hstack((sdiag(tensor[:, 3]), sdiag(tensor[:, 1]), sdiag(tensor[:, 5])))
        row3 = sp.hstack((sdiag(tensor[:, 4]), sdiag(tensor[:, 5]), sdiag(tensor[:, 2])))
        Sigma = sp.vstack((row1, row2, row3))
    else:
        raise Exception('Unexpected shape of tensor')

    return Sigma
def solve_constrained(segments, keyframes, alpha, t_lambda):
    n_constraints_per = 2
    n_constraints = n_constraints_per * (len(keyframes) - 2)

    # Create massive matrices
    r14, r23  = create_rs(segments, n_constraints)
    ps        = create_ps(segments, n_constraints)
    h14       = create_hs(segments)

    # formulate b
    b = ps - r14 * h14

    # add constraints to r23 and b
    nrows = sum([2 * len(seg) for seg in segments]) + n_constraints
    ncols = 4 * len(segments)
    c_start = sum([2 * len(seg) for seg in segments])
    if n_constraints != 0:
        for i in range(n_constraints / n_constraints_per):
            col = ((1 + i) * 4) - 1
            index_cx_left  = col - 2
            index_cx_right = col + 1
            index_cy_left  = col + 0
            index_cy_right = col + 3

            # Colinear constraints for r23
            c1 = c_start + i * n_constraints_per
            c2 = c_start + i * n_constraints_per + 1
            vs = [alpha, alpha, alpha, alpha]
            rs = [c1, c1, c2, c2]
            cs = [index_cx_left, index_cx_right, index_cy_left, index_cy_right]
            r23 = r23 + sps.coo_matrix((vs, (rs, cs)), shape = (nrows, ncols))

            # Colinear constraints for b
            p = segments[i][-1]
            b[c_start + i*n_constraints_per  ] = 2 * alpha * p[0]
            b[c_start + i*n_constraints_per+1] = 2 * alpha * p[1]
            # c[c_start + i*n_constraints_per] = 0
            # c[c_start + i*n_constraints_per+1] = 0

    # Log out matrices.
    if LOG_RS:
        log_mtx(r14, "r14")
        log_mtx(r23, "r23")

    #--option A, solve with NO regularizer
    # Solve least sqaures to calculate handle coordinates (index 0 contains
    # output).
    # return spla.lsqr(r23, b)[0]
    #--option A, end
    
    #--option B, solve WITH regularizer
    # Solve least sqaures to calculate handle coordinates (index 0 contains
    # output).
    A = r23.T * r23
    b2 = r23.T * b
    I = sps.identity(A.shape[0])
    res = scipy.sparse.linalg.spsolve(A + t_lambda * I, t_lambda * h14 + b2)

    # res = scipy.sparse.linalg.spsolve(A, b2)
    return res
 def unfold_accessibility(self,return_accessibility_matrix=False):
     """ Unfold accessibility storing path density.
     
     """
     P=self[0].copy()
     D=sp.identity(self.number_of_nodes,dtype=np.int32)
     P=P+D
     cumu=[P.nnz]
     
     for i in range(1,len(self)):
         print 'unfolding accessibility. Step ',i,'non-zeros: ',P.nnz
         self.bool_int_matrix(P)
         cumu.append(P.nnz)
         try:
             P=P+P*self[i]
         except:
             print 'Break at t = ',i
             break
     else:
         print '---> Unfolding complete.'
                 
     if return_accessibility_matrix:
         P = P.astype('bool')
         P = P.astype('int')
         return P,cumu
     else:
         return cumu
Example #25
0
 def linrel(self, X_t, y_t, X, mu, c):
     """Linrel algorithm"""
     temp = X_t.T * X_t + mu*identity(X_t.shape[1])
     temp = inv(temp)
     temp = X * temp * X_t.T
     score = (temp*y_t).toarray() + c/2 * numpy.linalg.norm(temp.toarray(), axis=1).reshape((temp.shape[0],1))
     return score
Example #26
0
 def __init__(self, W):
     self.W = spar.csc_matrix(W)
     self.WW = W.dot(W)
     self.WWW = self.WW.dot(W)
     self.I = spar.identity(W.shape[0]).tocsc()
     self.Id = self.I.toarray()
     self.Wd = self.W.toarray()
Example #27
0
 def setUp(self):
     n = 50
     nrhs = 20
     self.A = sp.rand(n, n, 0.4) + sp.identity(n)
     self.sol = np.ones((n, nrhs))
     self.rhsU = sp.triu(self.A) * self.sol
     self.rhsL = sp.tril(self.A) * self.sol
Example #28
0
def annhilation_operators(nr_fermions):
    """Compute the sparse-matrix representations of the annhilators d_j of
    a `nr_fermions` fermion system. Due to the cannonical anticommutator
    relations {d_i, adj(d)_j} = delta_ij, the matrix elements in the basis

                (|0,0,...,0> , |1,0,...,0>, ..., |1,...,1,1>),

    where (let N = `nr_fermions`)

            |n_1,...,n_N> = adj(d)_1^n_1 ... adj(d)_N^n_N |0,0,...,0>,

    have to be calculated as Kronecker products

                d_j = eta * eta * ... * d * I * ... * I.

    Here, I = diag(1, 1); eta = diag(1, -1); and d = ((0, 1), (0, 0)).

    :param nr_fermions: Number of fermions to consider
    :returns: List of length N, where the n-th entry is the sparse matrix
              representation of d_n

    """
    iden = sp.identity(2)
    eta = sp.spdiags([[1, -1]], [0], 2, 2)
    annh = sp.csr_matrix([[0, 1], [0, 0]])
    res = [tensor([eta]*n + [annh] + [iden]*(nr_fermions-1-n), sp.kron).tocsr()
           for n in range(nr_fermions)]

    for A in res:
        A.eliminate_zeros()
    return res
Example #29
0
    def __solve_by_Newton_method(self):
        tic = time.time()
        s = self.Value_j.nodes
        x = self.Policy_j.y
        ni = self.dims.ni

        if issparse(self.Value_j._Phi):
            Phik = kron(identity(ni), self.Value_j._Phi, format='csr').toarray()
            # Solve = spsolve
        else:
            Phik = np.kron(np.eye(self.dims.ni), self.Value._Phi)
            # Solve = np.linalg.solve

        # todo: fix the dimensions and check that Phik is transposed?

        def SOLVE(A,b):
            return np.linalg.solve(A, b) if (self.dims.ns == self.dims.nc) else np.linalg.lstsq(A, b)[0]


        self.options.print_header("Newton's", self.time.horizon)
        for it in range(self.options.maxit):
            cold = self.Value.c.copy().flatten()
            # print('\ncold', cold)
            self.Value_j[:], vc = self.vmax(s, x, self.Value, True)
            self.make_discrete_choice()
            step = - SOLVE(Phik - vc, Phik @ cold - self.Value.y.flatten())
            c = cold + step
            change = np.linalg.norm(step, np.Inf)
            self.Value.c = c.reshape(self.Value.c.shape)
            self.options.print_current_iteration(it, change, tic)
            if np.isnan(change):
                raise ValueError('nan found on Newton iteration')
            if change < self.options.tol:
                break
        self.options.print_last_iteration(tic, change)
def policyIter(beta, N, Wmax=1.):
    """
    Solve the infinite horizon cake eating problem using policy function iteration.
    Inputs:
        beta -- float, the discount factor
        N -- integer, size of discrete approximation of cake
        Wmax -- total amount of cake available
    Returns:
        values -- converged value function (Numpy array of length N)
        psi -- converged policy function (Numpy array of length N)
    """
    W = np.linspace(0,Wmax,N) #state space vector
    I = sparse.identity(N, format='csr')
    
    #precompute u(W-W') for all possible inputs
    actions = np.tile(W, N).reshape((N,N)).T
    actions = actions - actions.T
    actions[actions<0] = 0
    rewards = np.sqrt(actions)
    rewards[np.triu_indices(N, k=1)] = -1e10 #pre-computed reward function
    
    psi_ind = np.arange(N)
    rows = np.arange(0,N)
    tol = 1.
    while tol >= 1e-9:
        columns = psi_ind
        data = np.ones(N)
        Q = sparse.coo_matrix((data,(rows,columns)),shape=(N,N))
        Q = Q.tocsr()
        values = linalg.spsolve(I-beta*Q, u(W-W[psi_ind])).reshape(1,N)
        psi_ind1 = np.argmax(rewards + beta*values, axis=1)
        tol = math.sqrt(((W[psi_ind] - W[psi_ind1])**2).sum())
        psi_ind = psi_ind1
    return values.flatten(), W[psi_ind]
Example #31
0
    def Smoothed(self, lam, Y, D):

        return spsolve((SS.identity(D.T.shape[0]) + lam * D.T * D), Y).T[:, 0]
Example #32
0
    def expm(self,
             psi_0,
             H_time_eval=0.0,
             iterate=False,
             n_jobs=1,
             block_diag=False,
             a=-1j,
             start=None,
             stop=None,
             endpoint=None,
             num=None,
             shift=None):
        """Creates symmetry blocks of the Hamiltonian and then uses them to run `_expm_multiply()` in parallel.
		
		**Arguments NOT described below can be found in the documentation for the `exp_op` class.**

		Examples
		--------

		The example below builds on the code snippet shown in the description of the `block_ops` class.

		.. literalinclude:: ../../doc_examples/block_ops-example.py
			:linenos:
			:language: python
			:lines: 60-67

		Parameters
		-----------
		psi_0 : numpy.ndarray, list, tuple
			Quantum state which defined on the full Hilbert space of the problem. 
			Does not need to obey and sort of symmetry.
		t0 : float
			Inistial time to start the evolution at.
		H_time_eval : numpy.ndarray, list
			Times to evaluate the Hamiltonians at when doing the matrix exponentiation. 
		iterate : bool, optional
			Flag to return generator when set to `True`. Otherwise the output is an array of states. 
			Default is 'False'.
		n_jobs : int, optional 
			Number of processes requested for the computation time evolution dynamics. 

			NOTE: one of those processes is used to gather results. For best performance, all blocks 
			should be approximately the same size and `n_jobs-1` must be a common devisor of the number of
			blocks, such that there is roughly an equal workload for each process. Otherwise the computation 
			will be as slow as the slowest process.
		block_diag : bool, optional 
			When set to `True`, this flag puts the Hamiltonian matrices for the separate symemtri blocks
			into a list and then loops over it to do time evolution. When set to `False`, it puts all
			blocks in a single giant sparse block diagonal matrix. Default is `False`.

			This flag is useful if there are a lot of smaller-sized blocks.

		Returns
		--------
		obj
			if `iterate = True`, returns generator which generates the time dependent state in the 
			full H-space basis.

			if `iterate = False`, returns `numpy.ndarray` which has the time-dependent states in the 
			full H-space basis in the rows.

		Raises
		------
		ValueError
			Various `ValueError`s of `exp_op` class.
		RuntimeError
			Terminates when initial state has no projection onto the specified symmetry blocks.

		"""
        from ..operators import hamiltonian

        if iterate:
            if start is None and stop is None:
                raise ValueError(
                    "'iterate' can only be True with time discretization. must specify 'start' and 'stop' points."
                )

            if num is not None:
                if type(num) is not int:
                    raise ValueError("expecting integer for 'num'.")
            else:
                num = 50

            if endpoint is not None:
                if type(endpoint) is not bool:
                    raise ValueError("expecting bool for 'endpoint'.")
            else:
                endpoint = True

        else:
            if start is None and stop is None:
                if num != None:
                    raise ValueError("unexpected argument 'num'.")
                if endpoint != None:
                    raise ValueError("unexpected argument 'endpoint'.")
            else:
                if not (_np.isscalar(start) and _np.isscalar(stop)):
                    raise ValueError(
                        "expecting scalar values for 'start' and 'stop'")

                if not (_np.isreal(start) and _np.isreal(stop)):
                    raise ValueError(
                        "expecting real values for 'start' and 'stop'")

                if num is not None:
                    if type(num) is not int:
                        raise ValueError("expecting integer for 'num'.")
                else:
                    num = 50

                if endpoint is not None:
                    if type(endpoint) is not bool:
                        raise ValueError("expecting bool for 'endpoint'.")
                else:
                    endpoint = True

        P = []
        H_list = []
        psi_blocks = []
        for key, b in _iteritems(self._basis_dict):
            p = self._get_P(key)

            if _sp.issparse(psi_0):
                psi = p.H.dot(psi_0).toarray()
            else:
                psi = p.H.dot(psi_0)

            psi = psi.ravel()
            if _np.linalg.norm(psi) > 1000 * _np.finfo(self.dtype).eps:
                psi_blocks.append(psi)
                P.append(p.tocoo())
                H = self._get_H(key)
                H = H(H_time_eval) * a
                if shift is not None:
                    H += a * shift * _sp.identity(b.Ns, dtype=self.dtype)

                H_list.append(H)

        if block_diag and H_list:
            N_H = len(H_list)
            n_pp = N_H // n_jobs
            n_left = n_pp + N_H % n_jobs

            H_list_prime = []
            psi_blocks_prime = []

            psi_block = _np.hstack(psi_blocks[:n_left])
            H_block = _sp.block_diag(H_list[:n_left], format="csr")

            H_list_prime.append(H_block)
            psi_blocks_prime.append(psi_block)

            for i in range(n_jobs - 1):
                i1 = n_left + i * n_pp
                i2 = n_left + (i + 1) * n_pp
                psi_block = _np.hstack(psi_blocks[i1:i2])
                H_block = _sp.block_diag(H_list[i1:i2], format="csr")

                H_list_prime.append(H_block)
                psi_blocks_prime.append(psi_block)

            H_list = H_list_prime
            psi_blocks = psi_blocks_prime

        H_is_complex = _np.iscomplexobj(
            [_np.float32(1.0).astype(H.dtype) for H in H_list])

        if H_list:
            P = _sp.hstack(P, format="csr")
            if iterate:
                return _block_expm_iter(psi_blocks, H_list, P, start, stop,
                                        num, endpoint, n_jobs)
            else:
                ver = [int(v) for v in _scipy.__version__.split(".")]
                if H_is_complex and (start, stop, num, endpoint) != (
                        None, None, None, None) and ver[1] < 19:
                    mats = _block_expm_iter(psi_blocks, H_list, P, start, stop,
                                            num, endpoint, n_jobs)
                    return _np.array([mat for mat in mats]).T
                else:
                    psi_t = _Parallel(n_jobs=n_jobs)(
                        _delayed(_expm_multiply)(H,
                                                 psi,
                                                 start=start,
                                                 stop=stop,
                                                 num=num,
                                                 endpoint=endpoint)
                        for psi, H in _izip(psi_blocks, H_list))
                    psi_t = _np.hstack(psi_t).T
                    psi_t = P.dot(psi_t)
                    return psi_t
        else:
            raise RuntimeError(
                "initial state has no projection on to specified blocks.")
Example #33
0
def _transition_matrix(G,
                       nodelist=None,
                       weight="weight",
                       walk_type=None,
                       alpha=0.95):
    """Returns the transition matrix of G.

    This is a row stochastic giving the transition probabilities while
    performing a random walk on the graph. Depending on the value of walk_type,
    P can be the transition matrix induced by a random walk, a lazy random walk,
    or a random walk with teleportation (PageRank).

    Parameters
    ----------
    G : DiGraph
       A NetworkX graph

    nodelist : list, optional
       The rows and columns are ordered according to the nodes in nodelist.
       If nodelist is None, then the ordering is produced by G.nodes().

    weight : string or None, optional (default='weight')
       The edge data key used to compute each value in the matrix.
       If None, then each edge has weight 1.

    walk_type : string or None, optional (default=None)
       If None, `P` is selected depending on the properties of the
       graph. Otherwise is one of 'random', 'lazy', or 'pagerank'

    alpha : real
       (1 - alpha) is the teleportation probability used with pagerank

    Returns
    -------
    P : NumPy array
      transition matrix of G.

    Raises
    ------
    NetworkXError
        If walk_type not specified or alpha not in valid range
    """
    import numpy as np
    from scipy.sparse import identity, spdiags

    if walk_type is None:
        if nx.is_strongly_connected(G):
            if nx.is_aperiodic(G):
                walk_type = "random"
            else:
                walk_type = "lazy"
        else:
            walk_type = "pagerank"

    M = nx.to_scipy_sparse_matrix(G,
                                  nodelist=nodelist,
                                  weight=weight,
                                  dtype=float)
    n, m = M.shape
    if walk_type in ["random", "lazy"]:
        DI = spdiags(1.0 / np.array(M.sum(axis=1).flat), [0], n, n)
        if walk_type == "random":
            P = DI * M
        else:
            I = identity(n)
            P = (I + DI * M) / 2.0

    elif walk_type == "pagerank":
        if not (0 < alpha < 1):
            raise nx.NetworkXError("alpha must be between 0 and 1")
        # this is using a dense representation
        M = M.todense()
        # add constant to dangling nodes' row
        dangling = np.where(M.sum(axis=1) == 0)
        for d in dangling[0]:
            M[d] = 1.0 / n
        # normalize
        M = M / M.sum(axis=1)
        P = alpha * M + (1 - alpha) / n
    else:
        raise nx.NetworkXError("walk_type must be random, lazy, or pagerank")

    return P
Example #34
0
                block_column.append(
                    dok_matrix((int(comb(level_value - 1,
                                         dimension_value - 1)),
                                int(
                                    comb(absorbing_level_value - 1,
                                         dimension_value - 2)))).tocsc())
            else:
                block_column.append(
                    absorption_matrix(level_value, clone_number,
                                      max_level_value, dimension_value,
                                      mu_value, probability_values,
                                      stimulus_value))
        a_matrices[clone_number].append(block_column)

# Calculating the inverses of H matrices, and storing them in inverse order
h_matrices = [identity(d_matrices[-1].shape[0], format="csc")]

for level_order in range(len(d_matrices)):
    gc.collect()
    matrix = identity(b_matrices[-(level_order + 1)].shape[0],
                      format="csc") - b_matrices[-(level_order + 1)].dot(
                          h_matrices[-1].dot(d_matrices[-(level_order + 1)]))
    matrix = np.linalg.inv(matrix.todense())
    h_matrices.append(csc_matrix(matrix))

for clone_number in range(dimension_value):
    for column_number in range(len(a_matrices[clone_number])):
        # Calculating K matrices for the *column_number* column, and storing them in inverse order
        k_matrices = [a_matrices[clone_number][column_number][-1]]
        for level_order in range(
                len(a_matrices[clone_number][column_number]) - 1):
Example #35
0
    def rdot(self, other, shift=None, **call_kwargs):
        """Right-multiply an operator by matrix exponential.

		Let the matrix exponential object be :math:`\\exp(\\mathcal{O})` and let the operator be :math:`A`.
		Then this funcion implements:

		.. math::
			A \\exp(\\mathcal{O})

		Notes
		-----
		For `hamiltonian` objects `A`, this function is the same as `A.dot(expO)`.

		Parameters
		-----------
		other : obj
			The operator :math:`A` which multiplies from the left the matrix exponential :math:`\\exp(\\mathcal{O})`.
		shift : scalar
			Shifts operator to be exponentiated by a constant `shift` times the identity matrix: :math:`\\exp(\\mathcal{O} - \\mathrm{shift}\\times\\mathrm{Id})`.
		call_kwargs : obj, optional
			extra keyword arguments which include:
				**time** (*scalar*) - if the operator `O` to be exponentiated is a `hamiltonian` object.
				**pars** (*dict*) - if the operator `O` to be exponentiated is a `quantum_operator` object.
		
		Returns
		--------
		obj
			matrix exponential multiplied by `other` from the left.

		Examples
		---------
		>>> expO = exp_op(O)
		>>> A = exp_op(O,a=2j).get_mat()
		>>> print(expO.rdot(A))
		>>> print(A.dot(expO))
		
		"""

        is_sp = False
        is_ham = False

        if hamiltonian_core.ishamiltonian(other):
            shape = other._shape
            is_ham = True
        elif _sp.issparse(other):
            shape = other.shape
            is_sp = True
        elif other.__class__ in [_np.matrix, _np.ndarray]:
            shape = other.shape
        else:
            other = _np.asanyarray(other)
            shape = other.shape

        if other.ndim not in [1, 2]:
            raise ValueError(
                "Expecting a 1 or 2 dimensional array for 'other'")

        if other.ndim == 2:
            if shape[1] != self.get_shape[0]:
                raise ValueError(
                    "Dimension mismatch between expO: {0} and other: {1}".
                    format(self._O.get_shape, other.shape))
        elif shape[0] != self.get_shape[0]:
            raise ValueError(
                "Dimension mismatch between expO: {0} and other: {1}".format(
                    self._O.get_shape, other.shape))

        if shift is not None:
            M = (self._a *
                 (self.O(**call_kwargs) +
                  shift * _sp.identity(self.Ns, dtype=self.O.dtype))).T
        else:
            M = (self._a * self.O(**call_kwargs)).T

        if self._iterate:
            if is_ham:
                return _hamiltonian_iter_rdot(M, other.T, self._step,
                                              self._grid)
            else:
                return _iter_rdot(M, other.T, self._step, self._grid)
        else:
            if self._grid is None and self._step is None:

                if is_ham:
                    return _hamiltonian_rdot(M, other.T).T
                else:
                    return _expm_multiply(M, other.T).T
            else:
                if is_sp:
                    mats = _iter_rdot(M, other.T, self._step, self._grid)
                    return _np.array([mat for mat in mats])
                elif is_ham:
                    mats = _hamiltonian_iter_rdot(M, other.T, self._step,
                                                  self._grid)
                    return _np.array([mat for mat in mats])
                else:
                    ver = [int(v) for v in scipy.__version__.split(".")]
                    if _np.iscomplexobj(_np.float32(1.0).astype(
                            M.dtype)) and ver[1] < 19:
                        mats = _iter_rdot(M, other.T, self._step, self._grid)
                        return _np.array([mat for mat in mats])
                    else:
                        if other.ndim > 1:
                            return _expm_multiply(
                                M,
                                other.T,
                                start=self._start,
                                stop=self._stop,
                                num=self._num,
                                endpoint=self._endpoint).transpose(0, 2, 1)
                        else:
                            return _expm_multiply(M,
                                                  other.T,
                                                  start=self._start,
                                                  stop=self._stop,
                                                  num=self._num,
                                                  endpoint=self._endpoint)
Example #36
0
    def set_erro_aprox(self, mesh, SOL_ADM_f):

        ni = mesh.wirebasket_numbers[0][0]
        ares4_tag = self.ares4_tag
        ares6_tag = self.ares6_tag
        ares9_tag = self.ares9_tag
        D1_tag = mesh.tags['d1']
        gids_vert = mesh.mb.tag_get_data(mesh.tags['ID_reord_tag'],
                                         mesh.wirebasket_elems[0][3],
                                         flat=True)
        T = mesh.matrices['Tf']
        vertices = mesh.wirebasket_elems[0][3]

        sol_vers = sp.csc_matrix(SOL_ADM_f[gids_vert]).transpose()
        k1 = np.mean(SOL_ADM_f)
        prol = mesh.matrices['OP1_AMS']
        rest = mesh.matrices['OR1_AMS']
        sol_prol = (prol * sol_vers).transpose().toarray()[0]
        dif = abs(
            (sol_prol - SOL_ADM_f) / np.repeat(k1, len(mesh.all_volumes)))
        GIDs = mesh.mb.tag_get_data(mesh.tags['ID_reord_tag'],
                                    mesh.all_volumes,
                                    flat=True)
        mesh.mb.tag_set_data(ares4_tag, mesh.all_volumes, dif[GIDs])
        try:
            RTP = rest * T * prol
            som_col = np.array(prol.sum(axis=0))[0]
            diag_RTP = abs(RTP[range(len(vertices)),
                               range(len(vertices))].toarray()[0])
            mesh.mb.tag_set_data(ares6_tag, vertices, diag_RTP)
        except:
            import pdb
            pdb.set_trace()

        sol_vers = sol_vers.tocsr()
        ident = sp.identity(len(vertices)).tocsr()
        ident2 = ident.copy()
        delta = 1
        cont = 0

        for v in vertices:
            ident2[cont, cont] += delta
            s_v = ident2 * sol_vers
            s_p = (prol * s_v).transpose().toarray()[0]
            d1 = ((s_p - sol_prol) / sol_prol)[range(ni)]
            d1 = d1.max()
            mesh.mb.tag_set_data(ares9_tag, v, d1)
            ident2 = ident.copy()
            cont += 1

        for m2 in self.meshset_by_L2:
            tem_poço_no_vizinho = False
            meshset_by_L1 = mesh.mb.get_child_meshsets(m2)
            for m1 in meshset_by_L1:
                elem_by_L1 = mesh.mb.get_entities_by_handle(m1)
                ver_1 = mesh.mb.get_entities_by_type_and_tag(
                    m1, types.MBHEX, np.array([D1_tag]), np.array([3]))

                ar6 = float(mesh.mb.tag_get_data(ares6_tag, ver_1))
                mesh.mb.tag_set_data(ares6_tag, elem_by_L1,
                                     np.repeat(ar6, len(elem_by_L1)))
Example #37
0
    def _fastInnerProduct(
        self, projection_type, model=None, invert_model=False, invert_matrix=False
    ):
        """Fast version of getFaceInnerProduct.
            This does not handle the case of a full tensor property.

        Parameters
        ----------
        model : numpy.ndarray
            material property (tensor properties are possible) at each cell center (nC, (1, 3, or 6))

        projection_type : str
            'edges' or 'faces'

        returnP : bool
            returns the projection matrices

        invert_model : bool
            inverts the material property

        invert_matrix : bool
            inverts the matrix

        Returns
        -------
        (n_faces, n_faces) scipy.sparse.csr_matrix
            M, the inner product matrix

        """
        projection_type = projection_type[0].upper()
        if projection_type not in ["F", "E"]:
            raise ValueError("projection_type must be 'F' for faces or 'E' for edges")

        if model is None:
            model = np.ones(self.nC)

        if invert_model:
            model = 1.0 / model

        if is_scalar(model):
            model = model * np.ones(self.nC)

        # number of elements we are averaging (equals dim for regular
        # meshes, but for cyl, where we use symmetry, it is 1 for edge
        # variables and 2 for face variables)
        if self._meshType == "CYL":
            shape = getattr(self, "vn" + projection_type)
            n_elements = sum([1 if x != 0 else 0 for x in shape])
        else:
            n_elements = self.dim

        # Isotropic? or anisotropic?
        if model.size == self.nC:
            Av = getattr(self, "ave" + projection_type + "2CC")
            Vprop = self.cell_volumes * mkvc(model)
            M = n_elements * sdiag(Av.T * Vprop)

        elif model.size == self.nC * self.dim:
            Av = getattr(self, "ave" + projection_type + "2CCV")

            # if cyl, then only certain components are relevant due to symmetry
            # for faces, x, z matters, for edges, y (which is theta) matters
            if self._meshType == "CYL":
                if projection_type == "E":
                    model = model[:, 1]  # this is the action of a projection mat
                elif projection_type == "F":
                    model = model[:, [0, 2]]

            V = sp.kron(sp.identity(n_elements), sdiag(self.cell_volumes))
            M = sdiag(Av.T * V * mkvc(model))
        else:
            return None

        if invert_matrix:
            return sdinv(M)
        else:
            return M
Example #38
0
    adj = load_data()
    ind2cid = None
elif FLAGS.data == 'ddi':
    print('Loading ddi dataset')
    adj, ind2cid = load_ddi_data()
else:
    raise ValueError('You did not indicate the dataset.')

num_nodes = adj.shape[0]
num_edges = adj.sum()

import pickle
if FLAGS.feature is None:
    # Original Version:
    # Featureless
    features = sparse_to_tuple(sp.identity(num_nodes))
    num_features = features[2][1]
    features_nonzero = features[1].shape[0]
else:
    # My Own Version:
    # Feature
    if FLAGS.feature == 'mol2vec':
        feature_dict_filepath = os.path.join(DRUG_LIST_PATH, 'smiles2vec.pkl')
    elif FLAGS.feature == 'ssp':
        feature_dict_filepath = os.path.join(DRUG_LIST_PATH, 'smiles2ssp.pkl')
    elif FLAGS.feature == 'molenc':
        feature_dict_filepath = os.path.join(DRUG_LIST_PATH, 'smiles2molenc.pkl')
    elif FLAGS.feature == 'ecfp4':
        feature_dict_filepath = os.path.join(DRUG_LIST_PATH, 'smiles2ecfp4.pkl')
    else:
        raise ValueError('You did not indicate the feature')
Example #39
0
        return pickle.load(open(file_path, 'rb'))
    else:
        return None


# output_files
x_output_file = 'ind.decagon.allx'
graph_output_file = 'ind.decagon.graph'
# output graph
multi_graph = {}

voc = OrderedDict()
multi_graph_i = pk_load(graph_output_file)
for type, dict_ls in multi_graph_i.items():
    if type not in multi_graph.keys():
        multi_graph[type] = defaultdict(list)
    for drug_k, drug_ls in dict_ls.items():
        if drug_k not in voc.keys():
            voc[drug_k] = len(voc)
        for drug_v in drug_ls:
            if drug_v not in voc.keys():
                voc[drug_v] = len(voc)
            multi_graph[type][voc[drug_k]].append(voc[drug_v])

# save graph and node feature
pk_save(multi_graph, graph_output_file)

n_drugs = len(voc)
drug_feat = sp.identity(n_drugs)
pk_save(drug_feat, x_output_file)
def single_dmrg_step(sys, env, m):
    """Performs a single DMRG step using `sys` as the system and `env` as the
    environment, keeping a maximum of `m` states in the new basis.
    """
    assert is_valid_block(sys)
    assert is_valid_block(env)

    # Enlarge each block by a single site.
    sys_enl = enlarge_block(sys)
    if sys is env:  # no need to recalculate a second time
        env_enl = sys_enl
    else:
        env_enl = enlarge_block(env)

    assert is_valid_enlarged_block(sys_enl)
    assert is_valid_enlarged_block(env_enl)

    # Construct the full superblock Hamiltonian.
    m_sys_enl = sys_enl.basis_size
    m_env_enl = env_enl.basis_size
    sys_enl_op = sys_enl.operator_dict
    env_enl_op = env_enl.operator_dict
    superblock_hamiltonian = kron(sys_enl_op["H"], identity(m_env_enl)) + kron(identity(m_sys_enl), env_enl_op["H"]) + \
                             H2(sys_enl_op["conn_Sz"], sys_enl_op["conn_Sp"], env_enl_op["conn_Sz"], env_enl_op["conn_Sp"])

    # Call ARPACK to find the superblock ground state.  ("SA" means find the
    # "smallest in amplitude" eigenvalue.)
    (energy,), psi0 = eigsh(superblock_hamiltonian, k=1, which="SA")

    # Construct the reduced density matrix of the system by tracing out the
    # environment
    #
    # We want to make the (sys, env) indices correspond to (row, column) of a
    # matrix, respectively.  Since the environment (column) index updates most
    # quickly in our Kronecker product structure, psi0 is thus row-major ("C
    # style").
    psi0 = psi0.reshape([sys_enl.basis_size, -1], order="C")
    rho = np.dot(psi0, psi0.conjugate().transpose())

    # Diagonalize the reduced density matrix and sort the eigenvectors by
    # eigenvalue.
    evals, evecs = np.linalg.eigh(rho)
    possible_eigenstates = []
    for eval, evec in zip(evals, evecs.transpose()):
        possible_eigenstates.append((eval, evec))
    possible_eigenstates.sort(reverse=True, key=lambda x: x[0])  # largest eigenvalue first

    # Build the transformation matrix from the `m` overall most significant
    # eigenvectors.
    my_m = min(len(possible_eigenstates), m)
    transformation_matrix = np.zeros((sys_enl.basis_size, my_m), dtype='d', order='F')
    for i, (eval, evec) in enumerate(possible_eigenstates[:my_m]):
        transformation_matrix[:, i] = evec

    truncation_error = 1 - sum([x[0] for x in possible_eigenstates[:my_m]])
    print("truncation error:", truncation_error)

    # Rotate and truncate each operator.
    new_operator_dict = {}
    for name, op in sys_enl.operator_dict.items():
        new_operator_dict[name] = rotate_and_truncate(op, transformation_matrix)

    newblock = Block(length=sys_enl.length,
                     basis_size=my_m,
                     operator_dict=new_operator_dict)

    return newblock, energy
Example #41
0
def speye(n):
    """Sparse identity"""
    return sp.identity(n, format="csr")
Example #42
0
    def laplacian(coors, weights):

        n_nod = coors.shape[0]
        displ = (weights - sps.identity(n_nod)) * coors

        return displ
Example #43
0
def build_coefficient_matrix(df):
    """
    Builds the coefficient matrix B and applies boundary conditions to the
    A matrix. Utilizes the equations file to achieve this.
    """

    # Initialize variables
    eq_n    = EQ_NUM
    m,_     = df.shape
    eq_mtrx = init_matrix(m, eq_n)

    # Create top left matrix (mxm) of B matrix, will be an identity where the
    # row is zero at a boundary
    B_ = identity(m, format='lil', dtype=np.cfloat)

    eq_names  = EQ_NAMES
    var_names = VAR_NAMES

    # bound_point = 0
    # non_bound_point = 0
    # Populate all matrices along their diagonal
    for indx, row in df.iterrows():

        if indx % LOAD_INTERVAL == 0:
            print("{0:.1f}% complete..".format(indx * 100 / df.shape[0]))
            # print(bound_point, non_bound_point)

        # Get indices of neighboring points
        neighbors, bound, b_lst = get_neighbor_ind(df, indx)

        # Bound B if at a boundary
        if bound: boundary_condition_B(df, neighbors, B_)#;bound_point+=1
        # else: non_bound_point+=1

        # At a specific location, calculate the value for each matrix with
        # its respective equation.
        for r_indx in range(eq_n):
            for c_indx in range(eq_n):

                # Get current values (matrix and names of eq and var)
                curr_eq_mtrx  = eq_mtrx[r_indx][c_indx]
                curr_eq_name  = eq_names[r_indx]
                curr_var_name = var_names[c_indx]

                # Evaluate point if not at a wall
                if not bound:
                    evaluate_point(df, neighbors, curr_eq_mtrx, curr_eq_name, curr_var_name)

                # Evaluate point if at a wall
                else:
                    boundary_condition_A(df, neighbors, curr_eq_mtrx, curr_eq_name, curr_var_name, b_lst)

    # Evaluate inner corners (invisible because all their neighbors exist)
    evaluate_inner_corners(df, eq_mtrx)

    # Finalize A matrix
    A_mtrx = stitch_matrix( eq_mtrx, eq_n )

    # Build and finalize B matrix
    B_mtrx = stitch_matrix( build_B(B_, eq_names, var_names, m, eq_n), eq_n )
    # print(bound_point)
    return A_mtrx, B_mtrx
Example #44
0
def data_process(json_data_path,
                 mat_data_path,
                 train_frac,
                 min_train_edge,
                 min_val_test_edge,
                 verbose=True,
                 fixed_neg_samp=False,
                 neg2pos_ratio=1):
    with open(json_data_path +
              'reverse_relation_dict_unique.json') as data_file:
        reverse_relation_dict_unique = json.load(data_file)
    data_file.close()

    with open(json_data_path + 'paired_relations_dict.json') as data_file:
        reverse_paired_relations_dict = json.load(data_file)
    data_file.close()

    rel2end, rel2idx, rel2rel, rel2num, n_rel_set = dict(), dict(), dict(
    ), dict(), 0
    train_edges_dict, val_edges_dict, test_edges_dict = dict(), dict(), dict()
    train_edges_false_dict, val_edges_false_dict, test_edges_false_dict = dict(
    ), dict(), dict()
    cur_rel_idx = 0

    if verbose:
        print('(train, val, test) sizes; (start, end) nodes: relation name')

    for rel in reverse_paired_relations_dict.keys():
        edge_list = reverse_relation_dict_unique[rel]
        all_edges = np.array(edge_list)
        np.random.shuffle(all_edges)
        num_all_edges = all_edges.shape[0]

        num_train = int(np.floor(num_all_edges * train_frac))
        if num_train < min_train_edge: continue
        train_edges = all_edges[:num_train]

        start_nodes, end_nodes = np.unique(train_edges[:, 0]), np.unique(
            train_edges[:, 1])
        val_test_edges = []
        for start, end in all_edges[
                num_train:]:  # node has to be trained to be included in val/test
            if start in start_nodes and end in end_nodes:
                val_test_edges.append([start, end])
        val_test_edges = np.array(val_test_edges)
        if val_test_edges.shape[0] < min_val_test_edge: continue

        num_val = int(np.floor(val_test_edges.shape[0] / 2))
        val_edges = val_test_edges[:num_val]
        test_edges = val_test_edges[num_val:]
        num_test = test_edges.shape[0]

        rel2end[cur_rel_idx] = end_nodes
        train_edges_dict[cur_rel_idx] = train_edges
        val_edges_dict[cur_rel_idx] = val_edges
        test_edges_dict[cur_rel_idx] = test_edges

        if verbose:
            print('(%06d, %06d, %06d); (%06d, %06d) : %s'\
             %(num_train, num_val, num_test, start_nodes.shape[0], end_nodes.shape[0], rel))

        if fixed_neg_samp:
            train_edges_false = []
            while len(train_edges_false) < num_train * neg2pos_ratio:
                ss = np.random.choice(start_nodes)
                ee = np.random.choice(end_nodes)
                if _ismember([ss, ee], train_edges): continue
                if train_edges_false:
                    if _ismember([ss, ee], train_edges_false): continue
                train_edges_false.append([ss, ee])

        val_edges_false = []
        while len(val_edges_false) < num_val:
            ss = np.random.choice(start_nodes)
            ee = np.random.choice(end_nodes)
            if _ismember([ss, ee], all_edges): continue
            if val_edges_false:
                if _ismember([ss, ee], val_edges_false): continue
            val_edges_false.append([ss, ee])

        test_edges_false = []
        while len(test_edges_false) < num_test:
            ss = np.random.choice(start_nodes)
            ee = np.random.choice(end_nodes)
            if _ismember([ss, ee], all_edges): continue
            if test_edges_false:
                if _ismember([ss, ee], test_edges_false): continue
            test_edges_false.append([ss, ee])

        if fixed_neg_samp:
            train_edges_false_dict[cur_rel_idx] = train_edges_false
        val_edges_false_dict[cur_rel_idx] = val_edges_false
        test_edges_false_dict[cur_rel_idx] = test_edges_false

        rel2idx[rel] = cur_rel_idx
        rel2rel[cur_rel_idx] = cur_rel_idx
        rel2num[cur_rel_idx] = num_train
        cur_rel_idx += 1
        n_rel_set += 1

        if len(reverse_paired_relations_dict[rel]) == 1:
            twin_rel = reverse_paired_relations_dict[rel][0]

            twin_start_nodes = end_nodes
            twin_end_nodes = start_nodes
            twin_val_test_edges = []
            for twin_end, twin_start in all_edges[num_train:]:
                if twin_start in twin_start_nodes and twin_end in twin_end_nodes:
                    twin_val_test_edges.append([twin_start, twin_end])
            twin_val_test_edges = np.array(twin_val_test_edges)

            twin_num_val = int(np.floor(twin_val_test_edges.shape[0] / 2))
            twin_val_edges = twin_val_test_edges[:twin_num_val]
            twin_test_edges = twin_val_test_edges[twin_num_val:]
            twin_num_test = twin_test_edges.shape[0]

            rel2end[cur_rel_idx] = twin_end_nodes
            train_edges_dict[cur_rel_idx] = np.hstack([
                train_edges[:, 1].reshape(num_train, 1),
                train_edges[:, 0].reshape(num_train, 1)
            ])
            val_edges_dict[cur_rel_idx] = twin_val_edges
            test_edges_dict[cur_rel_idx] = twin_test_edges

            if verbose:
                print('(%06d, %06d, %06d); (%06d, %06d) : %s'\
                 %(num_train, twin_num_val, twin_num_test, twin_start_nodes.shape[0], twin_end_nodes.shape[0], twin_rel))

            if fixed_neg_samp:
                twin_train_edges_false = []
                while len(twin_train_edges_false) < len(train_edges_false):
                    ss = np.random.choice(twin_start_nodes)
                    ee = np.random.choice(twin_end_nodes)
                    if _ismember([ee, ss], train_edges): continue
                    if twin_train_edges_false:
                        if _ismember([ss, ee], twin_train_edges_false):
                            continue
                    twin_train_edges_false.append([ss, ee])

            twin_val_edges_false = []
            while len(twin_val_edges_false) < twin_num_val:
                ss = np.random.choice(twin_start_nodes)
                ee = np.random.choice(twin_end_nodes)
                if _ismember([ee, ss], all_edges): continue
                if twin_val_edges_false:
                    if _ismember([ss, ee], twin_val_edges_false): continue
                twin_val_edges_false.append([ss, ee])

            twin_test_edges_false = []
            while len(twin_test_edges_false) < twin_num_test:
                ss = np.random.choice(twin_start_nodes)
                ee = np.random.choice(twin_end_nodes)
                if _ismember([ee, ss], all_edges): continue
                if twin_test_edges_false:
                    if _ismember([ss, ee], twin_test_edges_false): continue
                twin_test_edges_false.append([ss, ee])

            if fixed_neg_samp:
                train_edges_false_dict[cur_rel_idx] = twin_train_edges_false
            val_edges_false_dict[cur_rel_idx] = twin_val_edges_false
            test_edges_false_dict[cur_rel_idx] = twin_test_edges_false

            rel2idx[twin_rel] = cur_rel_idx
            cur_rel_idx += 1
            rel2rel[rel2idx[twin_rel]] = rel2rel[rel2idx[rel]]
            rel2num[rel2idx[twin_rel]] = num_train

    np.save(mat_data_path + 'train_edges_dict', train_edges_dict)
    np.save(mat_data_path + 'val_edges_dict', val_edges_dict)
    np.save(mat_data_path + 'test_edges_dict', test_edges_dict)
    if fixed_neg_samp:
        np.save(mat_data_path + 'train_edges_false_dict',
                train_edges_false_dict)
    np.save(mat_data_path + 'val_edges_false_dict', val_edges_false_dict)
    np.save(mat_data_path + 'test_edges_false_dict', test_edges_false_dict)

    idx2rel = {i: r for r, i in rel2idx.iteritems()}
    n_rel = len(idx2rel)

    with open(json_data_path + 'entity2id.json') as data_file:
        entity2id = json.load(data_file)
    n_entity = len(entity2id)

    feat = sp.identity(n_entity)
    feat = sparse_to_tuple(feat.tocoo())

    # adj_list = [None]*n_rel
    # for r,train_edges in train_edges_dict.iteritems():
    # 	g = nx.DiGraph()
    # 	g.add_nodes_from(range(n_entity))
    # 	g.add_edges_from(train_edges)
    # 	adj_list[r] = nx.adjacency_matrix(g)

    # deg_list = [np.array(en_adj.sum(axis=0)).squeeze() for en_adj in adj_list]

    return idx2rel, rel2rel, rel2num, n_entity, n_rel, n_rel_set, feat, rel2end  #, adj_list, deg_list
Example #45
0
    def make_source_function(self):
        """
        Calculates the source function using the line absorption rate estimator `Edotlu_estimator`

        Formally it calculates the expression ( 1 - exp(-tau_ul) ) S_ul but this product is what we need later,
        so there is no need to factor out the source function explicitly.

        Parameters
        ----------
        model : tardis.model.Radial1DModel

        Returns
        -------
        Numpy array containing ( 1 - exp(-tau_ul) ) S_ul ordered by wavelength of the transition u -> l
        """

        model = self.model
        runner = self.runner

        macro_ref = self.atomic_data.macro_atom_references
        macro_data = self.atomic_data.macro_atom_data

        no_lvls = len(self.atomic_data.levels)
        no_shells = len(model.w)

        if runner.line_interaction_type == "macroatom":
            internal_jump_mask = (macro_data.transition_type >= 0).values
            ma_int_data = macro_data[internal_jump_mask]
            internal = self.original_plasma.transition_probabilities[
                internal_jump_mask]

            source_level_idx = ma_int_data.source_level_idx.values
            destination_level_idx = ma_int_data.destination_level_idx.values

        Edotlu_norm_factor = 1 / (runner.time_of_simulation * model.volume)
        exptau = 1 - np.exp(-self.original_plasma.tau_sobolevs)
        Edotlu = Edotlu_norm_factor * exptau * runner.Edotlu_estimator

        # The following may be achieved by calling the appropriate plasma
        # functions
        Jbluelu_norm_factor = ((const.c.cgs * model.time_explosion /
                                (4 * np.pi * runner.time_of_simulation *
                                 model.volume)).to("1/(cm^2 s)").value)
        # Jbluelu should already by in the correct order, i.e. by wavelength of
        # the transition l->u
        Jbluelu = runner.j_blue_estimator * Jbluelu_norm_factor

        upper_level_index = self.atomic_data.lines.index.droplevel(
            "level_number_lower")
        e_dot_lu = pd.DataFrame(Edotlu, index=upper_level_index)
        e_dot_u = e_dot_lu.groupby(level=[0, 1, 2]).sum()
        e_dot_u_src_idx = macro_ref.loc[e_dot_u.index].references_idx.values

        if runner.line_interaction_type == "macroatom":
            C_frame = pd.DataFrame(columns=np.arange(no_shells),
                                   index=macro_ref.index)
            q_indices = (source_level_idx, destination_level_idx)
            for shell in range(no_shells):
                Q = sp.coo_matrix((internal[shell], q_indices),
                                  shape=(no_lvls, no_lvls))
                inv_N = sp.identity(no_lvls) - Q
                e_dot_u_vec = np.zeros(no_lvls)
                e_dot_u_vec[e_dot_u_src_idx] = e_dot_u[shell].values
                C_frame[shell] = sp.linalg.spsolve(inv_N.T, e_dot_u_vec)

        e_dot_u.index.names = [
            "atomic_number",
            "ion_number",
            "source_level_number",
        ]  # To make the q_ul e_dot_u product work, could be cleaner
        transitions = self.original_plasma.atomic_data.macro_atom_data[
            self.original_plasma.atomic_data.macro_atom_data.transition_type ==
            -1].copy()
        transitions_index = transitions.set_index(
            ["atomic_number", "ion_number",
             "source_level_number"]).index.copy()
        tmp = self.original_plasma.transition_probabilities[(
            self.atomic_data.macro_atom_data.transition_type == -1).values]
        q_ul = tmp.set_index(transitions_index)
        t = model.time_explosion.value
        lines = self.atomic_data.lines.set_index("line_id")
        wave = lines.wavelength_cm.loc[
            transitions.transition_line_id].values.reshape(-1, 1)
        if runner.line_interaction_type == "macroatom":
            e_dot_u = C_frame.loc[e_dot_u.index]
        att_S_ul = wave * (q_ul * e_dot_u) * t / (4 * np.pi)

        result = pd.DataFrame(att_S_ul.values,
                              index=transitions.transition_line_id.values)
        att_S_ul = result.loc[lines.index.values].values

        # Jredlu should already by in the correct order, i.e. by wavelength of
        # the transition l->u (similar to Jbluelu)
        Jredlu = Jbluelu * np.exp(
            -self.original_plasma.tau_sobolevs) + att_S_ul
        if self.interpolate_shells > 0:
            (
                att_S_ul,
                Jredlu,
                Jbluelu,
                e_dot_u,
            ) = self.interpolate_integrator_quantities(att_S_ul, Jredlu,
                                                       Jbluelu, e_dot_u)
        else:
            runner.r_inner_i = runner.r_inner_cgs
            runner.r_outer_i = runner.r_outer_cgs
            runner.tau_sobolevs_integ = self.original_plasma.tau_sobolevs.values
            runner.electron_densities_integ = self.original_plasma.electron_densities.values

        return att_S_ul, Jredlu, Jbluelu, e_dot_u
Example #46
0
def solve_compressed_osher(L,
                           K,
                           mu1=10.,
                           Phi_init=None,
                           maxiter=None,
                           callback=None,
                           D=None,
                           Dinv=None,
                           r=1.,
                           lambda_=1.,
                           tol_abs=1.e-8,
                           tol_rel=1.e-6,
                           verbose=100):
    N = L.shape[0]  # alias
    mu = mu1 / float(N)

    # initial variables
    if Phi_init is None:
        Phi = np.linalg.qr(np.random.uniform(-1, 1, (H.shape[0], K)))[0]
    else:
        Phi = Phi_init
    P = Q = Phi
    b = np.zeros_like(Phi)
    B = np.zeros_like(Phi)

    # status variables for Phi-solve
    Hsolve = None
    refactorize = False

    # iteration state
    iters = count() if maxiter is None else xrange(maxiter)
    converged = False

    info = {}

    if D is None:
        D = sparse.identity(N)
        Dinv = sparse.identity(N)

    for i in iters:
        # update Phi
        if Hsolve is None or refactorize:
            A = (-L - L.T + sparse.eye(L.shape[0], L.shape[0]) *
                 (lambda_ + r)).tocsc()
            Hsolve = factorized(A)
        rhs = np.asfortranarray(r * (P - B) + lambda_ * (Q - b))
        Phi = Hsolve(rhs)

        # update Q
        Q_old = Q
        Q = shrink(Phi + b, mu / lambda_)

        # update P
        _PA = D * (Phi + B)
        _PP = np.dot(_PA.T, _PA)
        U, sigma, St = np.linalg.svd(_PP)
        Sigma_inv = np.diag(np.sqrt(1.0 / sigma))
        P_old = P
        P = (Dinv * (_PA.dot(U.dot(Sigma_inv.dot(St))))) / r

        # update residuals
        b += Phi - Q
        B += Phi - P

        # compute primal and dual residual
        snorm1 = np.linalg.norm(lambda_ * (Q - Q_old))
        rnorm1 = np.linalg.norm(Phi - Q)
        snorm2 = np.linalg.norm(r * (P - P_old))
        rnorm2 = np.linalg.norm(Phi - P)
        snorm = np.sqrt(snorm1**2 + snorm2**2)
        rnorm = np.sqrt(rnorm1**2 + rnorm2**2)

        if callback is not None:
            try:
                callback(L, mu, Phi, P, Q, r_primal=rnorm, r_dual=snorm)
            except StopIteration:
                converged = True

        # convergence checks
        eps_pri = np.sqrt(Phi.shape[1]) * tol_abs + tol_rel * max(
            map(np.linalg.norm, [Phi, Q, P]))
        eps_dual = np.sqrt(Phi.shape[1]) * tol_abs + tol_rel * max(
            np.linalg.norm(r * B), np.linalg.norm(lambda_ * b))
        if rnorm < eps_pri and snorm < eps_dual or converged:
            if verbose:
                print "converged!"
            converged = True
        if verbose and (i % verbose == 0 or converged or
                        (maxiter is not None and i == maxiter - 1)):
            sparsity = np.sum(mu * np.abs(Phi))
            eig = -(Phi * (L * Phi)).sum()
            gap1 = np.linalg.norm(Q - Phi)
            gap2 = np.linalg.norm(P - Phi)
            #ortho = np.linalg.norm(Phi.T.dot((D.T * D) * Phi) - np.eye(Phi.shape[1]))
            print i,
            print "o %0.8f" % (
                sparsity +
                eig),  # %0.4f %0.4f" % (sparsity + eig, sparsity, eig),
            print " | r [%.8f %.8f %.8f] s [%.8f]" % (gap1, gap2, rnorm, snorm)
        if converged:
            break
    info['num_iters'] = i
    info['r_primal'] = np.linalg.norm(Phi - Q) + np.linalg.norm(Phi - P)
    info['Q'] = Q
    info['P'] = P
    info['Phi'] = Phi
    return P, info
Example #47
0
side = int(1e1)
dim = side * side

#rank of present proc and number of cores
comm = _PETSc.COMM_WORLD
rank = comm.Get_rank()
cpu = comm.Get_size()

# matrix definition
A = _PETSc.Mat()
A.create(comm)
A.setSizes([dim, dim])
A.setType('aij')

#dummy matrix
scipyMat = identity(dim)
csr = (scipyMat.indptr, scipyMat.indices, scipyMat.data)
A.setPreallocationCSR(csr)

x, b = A.getVecs()
#define dummy right hand side b
numpRHS = _np.linspace(0., 9.,
                       dim)[b.getOwnershipRange()[0]:b.getOwnershipRange()[1]]
b = _PETSc.Vec().createWithArray(numpRHS, comm=comm)
# set zeros to the solution vector
x = _PETSc.Vec().createWithArray(
    _np.zeros(dim)[b.getOwnershipRange()[0]:b.getOwnershipRange()[1]],
    comm=comm)

SOLVE = True  #(or False)
if SOLVE:
Example #48
0
 def permute_cells(self):
     """Permutation matrix re-ordering of cells sorted by x, then y, then z"""
     # TODO: cache these?
     P = np.lexsort(self.gridCC.T)  # sort by x, then y, then z
     return sp.identity(self.nC).tocsr()[P]
Example #49
0
- adj : adjacent matrix for all nodes
- features : vector embedding (it will be set to itentity matrix)'''
for i in range(num_graph):

    globals()['adj{}'.format(i)], \
    globals()['features{}'.format(i)], \
    globals()['y_train{}'.format(i)], \
    globals()['y_val{}'.format(i)], \
    globals()['y_test{}'.format(i)], \
    globals()['train_mask{}'.format(i)], \
    globals()['val_mask{}'.format(i)], \
    globals()['test_mask{}'.format(i)], \
    globals()['train_size{}'.format(i)], \
    globals()['test_size{}'.format(i)] = load_corpus(cfg.dataset, i)
    
    globals()['features{}'.format(i)] = sp.identity(globals()['features{}'.format(i)].shape[0])
    
models = []
for i in range(num_graph): 
    # Some preprocessing
    # Here preprocess adj(adjacent matrix) to adj_hat(called support)
    adj = globals()['adj{}'.format(i)]
    features = globals()['features{}'.format(i)]
    y_train = globals()['y_train{}'.format(i)]
    y_val = globals()['y_val{}'.format(i)]
    y_test = globals()['y_test{}'.format(i)]
    train_mask = globals()['train_mask{}'.format(i)]
    val_mask = globals()['val_mask{}'.format(i)]
    test_mask = globals()['test_mask{}'.format(i)]
    train_size = globals()['train_size{}'.format(i)]
    test_size = globals()['test_size{}'.format(i)]
Example #50
0
def liouvillian_fast(H, c_op_list, data_only=False):
    """Assembles the Liouvillian superoperator from a Hamiltonian
    and a ``list`` of collapse operators. Like liouvillian, but with an
    experimental implementation which avoids creating extra Qobj instances,
    which can be advantageous for large systems.

    Parameters
    ----------
    H : qobj
        System Hamiltonian.

    c_op_list : array_like
        A ``list`` or ``array`` of collapse operators.

    Returns
    -------
    L : qobj
        Liouvillian superoperator.

    """

    if H is not None:
        if H.isoper:
            op_dims = H.dims
            op_shape = H.shape
        elif H.issuper:
            op_dims = H.dims[0]
            op_shape = [prod(op_dims[0]), prod(op_dims[0])]
        else:
            raise TypeError("Invalid type for Hamiltonian.")
    else:
        # no hamiltonian given, pick system size from a collapse operator
        if isinstance(c_op_list, list) and len(c_op_list) > 0:
            c = c_op_list[0]
            if c.isoper:
                op_dims = c.dims
                op_shape = c.shape
            elif c.issuper:
                op_dims = c.dims[0]
                op_shape = [prod(op_dims[0]), prod(op_dims[0])]
            else:
                raise TypeError("Invalid type for collapse operator.")
        else:
            raise TypeError("Either H or c_op_list must be given.")

    sop_dims = [[op_dims[0], op_dims[0]], [op_dims[1], op_dims[1]]]
    sop_shape = [prod(op_dims), prod(op_dims)]

    spI = sp.identity(op_shape[0])

    if H:
        if H.isoper:
            data = -1j * (sp.kron(spI, H.data, format='csr') -
                          sp.kron(H.data.T, spI, format='csr'))
        else:
            data = H.data
    else:
        data = sp.csr_matrix((sop_shape[0], sop_shape[1]), dtype=complex)

    for c_op in c_op_list:
        if c_op.issuper:
            data = data + c_op.data
        else:
            cd = c_op.data.T.conj()
            c = c_op.data
            data = data + sp.kron(cd.T, c, format='csr')
            cdc = cd * c
            data = data - 0.5 * sp.kron(spI, cdc, format='csr')
            data = data - 0.5 * sp.kron(cdc.T, spI, format='csr')

    if data_only:
        return data
    else:
        L = Qobj()
        L.dims = sop_dims
        L.data = data
        L.isherm = False
        L.superrep = 'super'
        return L
Example #51
0
def solve_compressed_splitorth(L,
                               K,
                               mu1=10.,
                               Phi_init=None,
                               maxiter=None,
                               callback=None,
                               D=None,
                               Dinv=None,
                               rho=1.0,
                               auto_adjust_penalty=True,
                               rho_adjust=2.0,
                               rho_adjust_sensitivity=5.,
                               tol_abs=1.e-8,
                               tol_rel=1.e-6,
                               verbose=100,
                               check_interval=10):
    N = L.shape[0]  # alias
    mu = (mu1 / float(N))

    # initial variables
    if Phi_init is None:
        Phi = np.linalg.qr(np.random.uniform(-1, 1, (L.shape[0], K)))[0]
    else:
        Phi = Phi_init
    P = Q = Phi
    U = np.zeros((2, Phi.shape[0], Phi.shape[1]))

    # status variables for Phi-solve
    Hsolve = None
    refactorize = False

    # iteration state
    iters = count() if maxiter is None else xrange(maxiter)
    converged = False

    info = {}

    if D is None:
        D = sparse.identity(N)
        Dinv = sparse.identity(N)

    for i in iters:
        # update Phi
        _PA = D * (P - U[1] + Q - U[0])
        _PP = np.dot(_PA.T, _PA)
        S, sigma, St = np.linalg.svd(_PP)
        Sigma_inv = np.diag(np.sqrt(1.0 / sigma))
        Phi = Dinv * (_PA.dot(S.dot(Sigma_inv.dot(St))))

        # update Q
        Q_old = Q
        Q = shrink(Phi + U[0], mu / rho)

        # update P
        if Hsolve is None or refactorize:
            A = (-L - L.T + sparse.eye(L.shape[0], L.shape[0]) * rho).tocsc()
            if Hsolve is None or (refactorize
                                  and not hasattr(Hsolve, 'cholesky_inplace')):
                Hsolve = factorized(A)
            elif refactorize:
                # when cholmod is available, use faster in-place refactorization
                Hsolve.cholesky_inplace(A)
                refactorize = False
        rhs = np.asfortranarray(rho * (Phi + U[1]))
        P_old = P
        P = Hsolve(rhs)

        # update residuals
        U[0] += Phi - Q
        U[1] += Phi - P

        if i % check_interval == 0:
            # compute primal and dual residual
            snorm = np.sqrt((rho * ((Q - Q_old)**2 + (P - P_old)**2)).sum())
            rnorm = np.sqrt(((Phi - Q)**2 + (Phi - P)**2).sum())
            if auto_adjust_penalty:
                if rnorm > rho_adjust_sensitivity * snorm:
                    rho *= rho_adjust
                    U /= rho_adjust
                    refactorize = True
                elif snorm > rho_adjust_sensitivity * rnorm:
                    rho /= rho_adjust
                    U *= rho_adjust
                    refactorize = True

            if callback is not None:
                try:
                    callback(L, mu, Phi, P, Q, r_primal=rnorm, r_dual=snorm)
                except StopIteration:
                    converged = True

            # convergence checks
            eps_pri = np.sqrt(Phi.shape[1] * 2) * tol_abs + tol_rel * max(
                map(np.linalg.norm, [Phi, np.vstack((Q, P))]))
            eps_dual = np.sqrt(
                Phi.shape[1]) * tol_abs + tol_rel * np.linalg.norm(rho * U)
            # TODO check if convergence check is correct for 3-function ADMM
            if rnorm < eps_pri and snorm < eps_dual or converged:
                if verbose:
                    print "converged!"
                converged = True
            if verbose and (i % verbose == 0 or converged or
                            (maxiter is not None and i == maxiter - 1)):
                sparsity = np.sum(mu * np.abs(Phi))
                eig = -(Phi * (L * Phi)).sum()
                gap1 = np.linalg.norm(Q - Phi)
                gap2 = np.linalg.norm(P - Phi)
                #ortho = np.linalg.norm(Phi.T.dot((D.T * D) * Phi) - np.eye(Phi.shape[1]))
                print i,
                print "o %0.8f" % (
                    sparsity +
                    eig),  # %0.4f %0.4f" % (sparsity + eig, sparsity, eig),
                print " | r [%.8f %.8f %.8f] s [%.8f]" % (gap1, gap2, rnorm,
                                                          snorm)
            if converged:
                break

    info['num_iters'] = i
    info['r_primal'] = rnorm
    info['r_dual'] = snorm
    info['Q'] = Q
    info['P'] = P
    info['Phi'] = Phi
    return Phi, info
Example #52
0
def getRegularizationMatrix(srcxaxis, srcyaxis, mode="new", kernel="c"):
    from scipy.sparse import diags, csc_matrix, lil_matrix
    import numpy
    #there are many plausible forms of regularization, but curvature
    #seems to be the default option in the literature. Use mode="new",
    # and kernel="c" to get this option.
    srcshape = (srcxaxis.size * srcyaxis.size, srcxaxis.size * srcyaxis.size)
    if mode[0:3] == "new":
        if kernel == None:
            kernel = "c"
        elif kernel == "z":
            kernel = numpy.array([[1]])
        elif kernel == "g":
            kernel = numpy.array([[0, -1, 0], [-1, 4, -1], [0, -1, 0]])
        elif kernel == "gc":
            kernel = numpy.array([[-1, -2, -1], [-2, 12, -2], [-1, -2, -1]])
        elif kernel == "c":
            kernel = numpy.array([[0, 0, 1, 0, 0], [0, 0, -4, 0, 0],
                                  [1, -4, 12, -4, 1], [0, 0, -4, 0, 0],
                                  [0, 0, 1, 0, 0]])
        elif kernel == "cc":
            kernel = 2 * numpy.array([[0, 0, 1, 0, 0], [0, 0, -4, 0, 0],
                                      [1, -4, 12, -4, 1], [0, 0, -4, 0, 0],
                                      [0, 0, 1, 0, 0]])
            kernel += numpy.array([[1, 0, 0, 0, 1], [0, -4, 0, -4, 0],
                                   [0, 0, 12, 0, 0], [0, -4, 0, -4, 0],
                                   [1, 0, 0, 0, 1]])

        stringerr = "regularization kernel not square symmetric"
        assert kernel.shape[0] == kernel.shape[1], stringerr
        assert numpy.sum(numpy.flipud(kernel) - kernel) == 0, stringerr
        assert numpy.sum(numpy.fliplr(kernel) - kernel) == 0, stringerr
        kc = (kernel.shape[0] - 1) / 2
        if kernel[kc, kc] < 0: kernel = -kernel

        vals = []
        poss = []
        for i in range(kernel.shape[0]):
            for j in range(kernel.shape[1]):
                vals.append(kernel[i, j])
                poss.append((j - kc) + (i - kc) * srcyaxis.size)

        mat = diags(vals,
                    poss,
                    shape=(srcxaxis.size * srcyaxis.size,
                           srcxaxis.size * srcyaxis.size))
        mat = lil_matrix(mat)

        #now hammer out the glitches
        #(points where the regularization kernel overlaps the edges of the grid)
        allcols = numpy.arange(srcxaxis.size * srcyaxis.size)
        for i in range(kc):
            le = allcols[allcols % srcxaxis.size == i]
            re = allcols[allcols % srcxaxis.size == srcxaxis.size - 1 - i]
            for el in le:
                mat[el, el - 1] = 0
            for el in re:
                if el != allcols.max() - i:
                    mat[el, el + 1] = 0

    if mode == "zeroth":
        from scipy.sparse import identity
        return identity(srcxaxis.size * srcyaxis.size).tocsc()

    if mode == "gradient":
        mat = diags([-2, -2, 8, -2, -2],
                    [-srcxaxis.size, -1, 0, 1, srcxaxis.size],
                    shape=(srcxaxis.size * srcyaxis.size,
                           srcxaxis.size * srcyaxis.size))
        mat = lil_matrix(mat)

        #glitches are at left and right edges
        allcols = numpy.arange(srcxaxis.size * srcyaxis.size)
        leftedges = allcols[allcols % srcxaxis.size == 0]
        rightedges = allcols[allcols % srcxaxis.size == srcxaxis.size - 1]
        for el in leftedges:
            mat[el, el - 1] = 0
        for el in rightedges:
            if el != allcols.max():
                mat[el, el + 1] = 0

    elif mode == "curvatureOLD":
        mat = diags([2, 2, -8, -8, 24, -8, -8, 2, 2], [
            -2 * srcxaxis.size, -2, -srcxaxis.size, -1, 0, 1, srcxaxis.size,
            2 * srcxaxis.size, 2
        ],
                    shape=(srcxaxis.size * srcyaxis.size,
                           srcxaxis.size * srcyaxis.size))
        mat = lil_matrix(mat)

        #glitches are at left and right edges
        allcols = numpy.arange(srcxaxis.size * srcyaxis.size)
        leftedges = allcols[allcols % srcxaxis.size == 0]
        rightedges = allcols[allcols % srcxaxis.size == srcxaxis.size - 1]
        leftedgesinone = allcols[allcols % srcxaxis.size == 1]
        rightedgesinone = allcols[allcols % srcxaxis.size == srcxaxis.size - 2]

        for el in leftedges:
            mat[el, el - 1] = 0
            mat[el, el - 2] = 0
        for el in rightedges:
            if el != allcols.max():
                mat[el, el + 1] = 0
                mat[el, el + 2] = 0
        for el in leftedgesinone:
            mat[el, el - 2] = 0
        for el in rightedgesinone:
            if el != allcols.max() - 1:
                mat[el, el + 2] = 0

    elif mode == "curvature":
        I, J = srcxaxis.size, srcyaxis.size
        matrix = lil_matrix((I * J, I * J))
        for i in range(I - 2):
            for j in range(J):
                ij = i + j * J
                i1j = ij + 1
                i2j = ij + 2
                matrix[ij, ij] += 1.
                matrix[i1j, i1j] += 4
                matrix[i2j, i2j] += 1
                matrix[ij, i2j] += 1
                matrix[i2j, ij] += 1
                matrix[ij, i1j] -= 2
                matrix[i1j, ij] -= 2
                matrix[i1j, i2j] -= 2
                matrix[i2j, i1j] -= 2
        for i in range(I):
            for j in range(J - 2):
                ij = i + j * J
                ij1 = ij + J
                ij2 = ij + 2 * J
                matrix[ij, ij] += 1
                matrix[ij1, ij1] += 4
                matrix[ij2, ij2] += 1
                matrix[ij, ij2] += 1
                matrix[ij2, ij] += 1
                matrix[ij, ij1] -= 2
                matrix[ij1, ij] -= 2
                matrix[ij1, ij2] -= 2
                matrix[ij2, ij1] -= 2
        for i in range(I):
            iJ_1 = i + (J - 2) * J
            iJ = i + (J - 1) * J
            matrix[iJ_1, iJ_1] += 1
            matrix[iJ, iJ] += 1
            matrix[iJ, iJ_1] -= 1
            matrix[iJ_1, iJ] -= 1
        for j in range(J):
            I_1j = (I - 2) + j * J
            Ij = (I - 1) + j * J
            matrix[I_1j, I_1j] += 1
            matrix[Ij, Ij] += 1
            matrix[Ij, I_1j] -= 1
            matrix[I_1j, Ij] -= 1
        for i in range(I):
            iJ = i + (J - 1) * J
            matrix[iJ, iJ] += 1
        for j in range(J):
            Ij = (I - 1) + j * J
            matrix[Ij, Ij] += 1
        mat = matrix

    return mat.tocsc()
Example #53
0
    def matrix_rhs(self, g, data):
        """
        Return the matrix and righ-hand side for a discretization of a second
        order elliptic equation using hybrid dual virtual element method.
        The name of data in the input dictionary (data) are:
        perm : tensor.SecondOrderTensor
            Permeability defined cell-wise. If not given a identity permeability
            is assumed and a warning arised.
        source : array (self.g.num_cells)
            Scalar source term defined cell-wise. If not given a zero source
            term is assumed and a warning arised.
        bc : boundary conditions (optional)
        bc_val : dictionary (optional)
            Values of the boundary conditions. The dictionary has at most the
            following keys: 'dir' and 'neu', for Dirichlet and Neumann boundary
            conditions, respectively.

        Parameters
        ----------
        g : grid, or a subclass, with geometry fields computed.
        data: dictionary to store the data.

        Return
        ------
        matrix: sparse csr (g.num_faces+g_num_cells, g.num_faces+g_num_cells)
            Saddle point matrix obtained from the discretization.
        rhs: array (g.num_faces+g_num_cells)
            Right-hand side which contains the boundary conditions and the scalar
            source term.

        Examples
        --------
        b_faces_neu = ... # id of the Neumann faces
        b_faces_dir = ... # id of the Dirichlet faces
        bnd = bc.BoundaryCondition(g, np.hstack((b_faces_dir, b_faces_neu)),
                                ['dir']*b_faces_dir.size + ['neu']*b_faces_neu.size)
        bnd_val = {'dir': fun_dir(g.face_centers[:, b_faces_dir]),
                   'neu': fun_neu(f.face_centers[:, b_faces_neu])}

        data = {'perm': perm, 'source': f, 'bc': bnd, 'bc_val': bnd_val}

        H, rhs = hybrid.matrix_rhs(g, data)
        l = sps.linalg.spsolve(H, rhs)
        u, p = hybrid.compute_up(g, l, data)
        P0u = dual.project_u(g, perm, u)

        """
        # pylint: disable=invalid-name

        # If a 0-d grid is given then we return an identity matrix
        if g.dim == 0:
            return sps.identity(self.ndof(g), format="csr"), np.zeros(1)

        parameter_dictionary = data[pp.PARAMETERS][self.keyword]
        k = parameter_dictionary["second_order_tensor"]
        f = parameter_dictionary["source"]
        bc = parameter_dictionary["bc"]
        bc_val = parameter_dictionary["bc_values"]
        a = parameter_dictionary["aperture"]

        faces, _, sgn = sps.find(g.cell_faces)

        # Map the domain to a reference geometry (i.e. equivalent to compute
        # surface coordinates in 1d and 2d)
        c_centers, f_normals, f_centers, _, _, _ = pp.map_geometry.map_grid(g)

        # Weight for the stabilization term
        diams = g.cell_diameters()
        weight = np.power(diams, 2 - g.dim)

        # Allocate the data to store matrix entries, that's the most efficient
        # way to create a sparse matrix.
        size = np.sum(
            np.square(g.cell_faces.indptr[1:] - g.cell_faces.indptr[:-1]))
        I = np.empty(size, dtype=np.int)
        J = np.empty(size, dtype=np.int)
        data = np.empty(size)
        rhs = np.zeros(g.num_faces)

        idx = 0
        # Use a dummy keyword to trick the constructor of dualVEM.
        massHdiv = pp.MVEM("dummy").massHdiv

        for c in np.arange(g.num_cells):
            # For the current cell retrieve its faces
            loc = slice(g.cell_faces.indptr[c], g.cell_faces.indptr[c + 1])
            faces_loc = faces[loc]
            ndof = faces_loc.size

            # Retrieve permeability and normals assumed outward to the cell.
            sgn_loc = sgn[loc].reshape((-1, 1))
            normals = np.multiply(np.tile(sgn_loc.T, (g.dim, 1)),
                                  f_normals[:, faces_loc])

            # Compute the H_div-mass local matrix
            A = massHdiv(
                k.values[0:g.dim, 0:g.dim, c],
                c_centers[:, c],
                a[c] * g.cell_volumes[c],
                f_centers[:, faces_loc],
                a[c] * normals,
                np.ones(ndof),
                diams[c],
                weight[c],
            )[0]
            # Compute the Div local matrix
            B = -np.ones((ndof, 1))
            # Compute the hybrid local matrix
            C = np.eye(ndof, ndof)

            # Perform the static condensation to compute the hybrid local matrix
            invA = np.linalg.inv(A)
            S = 1 / np.dot(B.T, np.dot(invA, B))
            L = np.dot(np.dot(invA, np.dot(B, np.dot(S, B.T))), invA)
            L = np.dot(np.dot(C.T, L - invA), C)

            # Compute the local hybrid right using the static condensation
            rhs[faces_loc] += np.dot(C.T,
                                     np.dot(invA,
                                            np.dot(B, np.dot(S, f[c]))))[:, 0]

            # Save values for hybrid matrix
            cols = np.tile(faces_loc, (faces_loc.size, 1))
            loc_idx = slice(idx, idx + cols.size)
            I[loc_idx] = cols.T.ravel()
            J[loc_idx] = cols.ravel()
            data[loc_idx] = L.ravel()
            idx += cols.size

        # construct the global matrices
        H = sps.coo_matrix((data, (I, J))).tocsr()

        # Apply the boundary conditions
        if bc is not None:

            if np.any(bc.is_dir):
                norm = sps.linalg.norm(H, np.inf)
                is_dir = np.where(bc.is_dir)[0]

                H[is_dir, :] *= 0
                H[is_dir, is_dir] = norm
                rhs[is_dir] = norm * bc_val[is_dir]

            if np.any(bc.is_neu):
                faces, _, sgn = sps.find(g.cell_faces)
                sgn = sgn[np.unique(faces, return_index=True)[1]]

                is_neu = np.where(bc.is_neu)[0]
                rhs[is_neu] += sgn[is_neu] * bc_val[is_neu] * g.face_areas[
                    is_neu]

        return H, rhs
Example #54
0
 def _identity_phi(self) -> csc_matrix:
     r"""
     Identity operator acting only on the `\phi` Hilbert subspace.
     """
     pt_count = self.grid.pt_count
     return sparse.identity(pt_count, format="csc")
Example #55
0
    def sandwich(self, other, shift=None, **call_kwargs):
        """Sandwich operator between matrix exponentials.

		Let the matrix exponential object be :math:`\\exp(\\mathcal{O})` and let the operator to be sandwiched be
		:math:`C`. Then this funcion implements:

		.. math::
			\\exp(\\mathcal{O})^\\dagger C \\exp(\\mathcal{O})

		Notes
		-----
		The matrix exponential to multiply :math:`C` from the left is hermitian conjugated.

		Parameters
		-----------
		other : obj
			The operator :math:`C` to be sandwiched by the matrix exponentials :math:`\\exp(\\mathcal{O})^\\dagger`
			and :math:`\\exp(\\mathcal{O})`.
		shift : scalar
			Shifts operator to be exponentiated by a constant `shift` times te identity matrix: :math:`\\exp(\\mathcal{O} - \\mathrm{shift}\\times\\mathrm{Id})`.
		call_kwargs : obj, optional
			extra keyword arguments which include:
				**time** (*scalar*) - if the operator `O` to be exponentiated is a `hamiltonian` object.
				**pars** (*dict*) - if the operator `O` to be exponentiated is a `quantum_operator` object.
		
		Returns
		--------
		obj
			operator `other` sandwiched between matrix exponential `exp_op` and its hermitian conjugate.

		Examples
		---------
		>>> expO = exp_op(O,a=1j)
		>>> A = exp_op(O.H)
		>>> print(expO.sandwich(A))
		
		"""
        is_ham = False
        if hamiltonian_core.ishamiltonian(other):
            shape = other._shape
            is_ham = True
        elif _sp.issparse(other):
            shape = other.shape
        elif other.__class__ in [_np.matrix, _np.ndarray]:
            shape = other.shape
        else:
            other = _np.asanyarray(other)
            shape = other.shape

        if other.ndim != 2:
            raise ValueError("Expecting a 2 dimensional array for 'other'")

        if shape[0] != shape[1]:
            raise ValueError("Expecting square array for 'other'")

        if shape[0] != self.get_shape[0]:
            raise ValueError(
                "Dimension mismatch between expO: {0} and other: {1}".format(
                    self.get_shape, other.shape))

        if shift is not None:
            M = self._a.conjugate() * (
                self.O.H(**call_kwargs) +
                shift * _sp.identity(self.Ns, dtype=self.O.dtype))
        else:
            M = self._a.conjugate() * self.O.H(**call_kwargs)

        if self._iterate:

            if is_ham:
                mat_iter = _hamiltonian_iter_sandwich(M, other, self._step,
                                                      self._grid)
            else:
                mat_iter = _iter_sandwich(M, other, self._step, self._grid)

            return mat_iter
        else:
            if self._grid is None and self._step is None:

                other = self.dot(other, **call_kwargs)
                other = self.H.rdot(other, **call_kwargs)
                return other

            else:
                if is_ham:
                    mat_iter = _hamiltonian_iter_sandwich(
                        M, other, self._step, self._grid)
                    return _np.asarray([mat for mat in mat_iter])
                else:
                    mat_iter = _iter_sandwich(M, other, self._step, self._grid)
                    return _np.asarray([mat for mat in mat_iter]).transpose(
                        (1, 2, 0))
Example #56
0
    def _fastInnerProductDeriv(
        self, projection_type, model, invert_model=False, invert_matrix=False
    ):
        """

        Parameters
        ----------

        projection_type : str
            'E' or 'F'

        tensorType : TensorType
            type of the tensor

        invert_model : bool
            inverts the material property

        invert_matrix : bool
            inverts the matrix


        Returns
        -------
        function
            dMdmu, the derivative of the inner product matrix

        """

        projection_type = projection_type[0].upper()
        if projection_type not in ["F", "E"]:
            raise ValueError("projection_type must be 'F' for faces or 'E' for edges")

        tensorType = TensorType(self, model)

        dMdprop = None

        if invert_matrix or invert_model:
            MI = self._fastInnerProduct(
                projection_type,
                model,
                invert_model=invert_model,
                invert_matrix=invert_matrix,
            )

        # number of elements we are averaging (equals dim for regular
        # meshes, but for cyl, where we use symmetry, it is 1 for edge
        # variables and 2 for face variables)
        if self._meshType == "CYL":
            shape = getattr(self, "vn" + projection_type)
            n_elements = sum([1 if x != 0 else 0 for x in shape])
        else:
            n_elements = self.dim

        if tensorType == 0:  # isotropic, constant
            Av = getattr(self, "ave" + projection_type + "2CC")
            V = sdiag(self.cell_volumes)
            ones = sp.csr_matrix(
                (np.ones(self.nC), (range(self.nC), np.zeros(self.nC))),
                shape=(self.nC, 1),
            )
            if not invert_matrix and not invert_model:
                dMdprop = n_elements * Av.T * V * ones
            elif invert_matrix and invert_model:
                dMdprop = n_elements * (
                    sdiag(MI.diagonal() ** 2)
                    * Av.T
                    * V
                    * ones
                    * sdiag(1.0 / model ** 2)
                )
            elif invert_model:
                dMdprop = n_elements * Av.T * V * sdiag(-1.0 / model ** 2)
            elif invert_matrix:
                dMdprop = n_elements * (sdiag(-MI.diagonal() ** 2) * Av.T * V)

        elif tensorType == 1:  # isotropic, variable in space
            Av = getattr(self, "ave" + projection_type + "2CC")
            V = sdiag(self.cell_volumes)
            if not invert_matrix and not invert_model:
                dMdprop = n_elements * Av.T * V
            elif invert_matrix and invert_model:
                dMdprop = n_elements * (
                    sdiag(MI.diagonal() ** 2) * Av.T * V * sdiag(1.0 / model ** 2)
                )
            elif invert_model:
                dMdprop = n_elements * Av.T * V * sdiag(-1.0 / model ** 2)
            elif invert_matrix:
                dMdprop = n_elements * (sdiag(-MI.diagonal() ** 2) * Av.T * V)

        elif tensorType == 2:  # anisotropic
            Av = getattr(self, "ave" + projection_type + "2CCV")
            V = sp.kron(sp.identity(self.dim), sdiag(self.cell_volumes))

            if self._meshType == "CYL":
                Zero = sp.csr_matrix((self.nC, self.nC))
                Eye = sp.eye(self.nC)
                if projection_type == "E":
                    P = sp.hstack([Zero, Eye, Zero])
                    # print(P.todense())
                elif projection_type == "F":
                    P = sp.vstack(
                        [sp.hstack([Eye, Zero, Zero]), sp.hstack([Zero, Zero, Eye])]
                    )
                    # print(P.todense())
            else:
                P = sp.eye(self.nC * self.dim)

            if not invert_matrix and not invert_model:
                dMdprop = Av.T * P * V
            elif invert_matrix and invert_model:
                dMdprop = (
                    sdiag(MI.diagonal() ** 2) * Av.T * P * V * sdiag(1.0 / model ** 2)
                )
            elif invert_model:
                dMdprop = Av.T * P * V * sdiag(-1.0 / model ** 2)
            elif invert_matrix:
                dMdprop = sdiag(-MI.diagonal() ** 2) * Av.T * P * V

        if dMdprop is not None:

            def innerProductDeriv(v=None):
                if v is None:
                    warnings.warn(
                        "Depreciation Warning: TensorMesh.innerProductDeriv."
                        " You should be supplying a vector. "
                        "Use: sdiag(u)*dMdprop",
                        DeprecationWarning,
                    )
                    return dMdprop
                return sdiag(v) * dMdprop

            return innerProductDeriv
        else:
            return None
Example #57
0
model_str = FLAGS.model
dataset_str = FLAGS.dataset

# Load data
adj, features = load_data(dataset_str)

# Store original adjacency matrix (without diagonal entries) for later
adj_orig = adj
adj_orig = adj_orig - sp.dia_matrix((adj_orig.diagonal()[np.newaxis, :], [0]), shape=adj_orig.shape)
adj_orig.eliminate_zeros()

adj_train, train_edges, val_edges, val_edges_false, test_edges, test_edges_false = mask_test_edges(adj)
adj = adj_train

if FLAGS.features == 0:
    features = sp.identity(features.shape[0])  # featureless

# Some preprocessing
adj_norm = preprocess_graph(adj)

# Define placeholders
placeholders = {
    'features': tf.sparse_placeholder(tf.float32),
    'adj': tf.sparse_placeholder(tf.float32),
    'adj_orig': tf.sparse_placeholder(tf.float32),
    'dropout': tf.placeholder_with_default(0., shape=())
}

# Graph attributes
num_nodes = adj.shape[0]
features = sparse_to_tuple(features.tocoo())
Example #58
0
    def _build_coupled_matrices(self, problem, names, cacheid=None):

        index_dict = {}
        for axis, basis in enumerate(self.domain.bases):
            if basis.separable:
                index_dict['n' + basis.name] = self.global_index[axis]

        index = self.global_index
        # Find applicable equations
        selected_eqs = [
            eq for eq in problem.eqs if eval(eq['raw_condition'], index_dict)
        ]
        selected_bcs = [
            bc for bc in problem.bcs if eval(bc['raw_condition'], index_dict)
        ]
        ndiff = sum(eq['differential'] for eq in selected_eqs)
        # Check selections
        nvars = problem.nvars
        neqs = len(selected_eqs)
        nbcs = len(selected_bcs)
        if neqs != nvars:
            raise ValueError(
                "Pencil {} has {} equations for {} variables.".format(
                    index, neqs, nvars))
        if nbcs != ndiff:
            raise ValueError(
                "Pencil {} has {} boundary conditions for {} differential equations."
                .format(index, nbcs, ndiff))
        Neqs = len(problem.eqs)
        Nbcs = len(problem.bcs)

        zbasis = self.domain.bases[-1]
        zsize = zbasis.coeff_size
        zdtype = zbasis.coeff_dtype
        compound = hasattr(zbasis, 'subbases')
        self.dirichlet = dirichlet = any(
            problem.meta[:][zbasis.name]['dirichlet'])

        # Identity
        Identity = sparse.identity(zsize, dtype=zdtype).tocsr()
        Zero = sparse.csr_matrix((zsize, zsize), dtype=zdtype)

        # Basis matrices
        Ra = Rd = Identity
        if dirichlet:
            Rd = basis.PrefixBoundary
        if ndiff:
            P = basis.Precondition
            Fb = basis.FilterBoundaryRow
            Cb = basis.ConstantToBoundary
            Rd_Fb_P = Rd * Fb * P
            Rd_Cb = Rd * Cb
        if compound:
            Fm = basis.FilterMatchRows
            M = basis.Match
            Ra_Fm = Ra * Fm
        if ndiff and compound:
            Rd_Fm_Fb_P = Rd * Fm * Fb * P

        # Pencil matrices
        G_eq = sparse.csr_matrix((zsize * nvars, zsize * Neqs), dtype=zdtype)
        G_bc = sparse.csr_matrix((zsize * nvars, zsize * Nbcs), dtype=zdtype)
        C = lambda: sparse.csr_matrix(
            (zsize * nvars, zsize * nvars), dtype=zdtype)
        LHS = {name: C() for name in names}

        # Kronecker stencils
        δG_eq = np.zeros([nvars, Neqs])
        δG_bc = np.zeros([nvars, Nbcs])
        δC = np.zeros([nvars, nvars])

        # Use scipy sparse kronecker product with CSR output
        kron = partial(sparse.kron, format='csr')

        # Build matrices
        bc_iter = iter(selected_bcs)
        for i, eq in enumerate(selected_eqs):

            differential = eq['differential']
            if differential:
                bc = next(bc_iter)

            # Build RHS equation process matrix
            if (not differential) and (not compound):
                Gi_eq = Ra
            elif (not differential) and compound:
                Gi_eq = Ra_Fm
            elif differential and (not compound):
                Gi_eq = Rd_Fb_P
            elif differential and compound:
                Gi_eq = Rd_Fm_Fb_P

            # Kronecker into system matrix
            e = problem.eqs.index(eq)
            δG_eq[i, e] = 1
            G_eq = G_eq + kron(Gi_eq, δG_eq)
            δG_eq[i, e] = 0

            if differential:
                # Build RHS BC process matrix
                Gi_bc = Rd_Cb
                # Kronecker into system matrix
                b = problem.bcs.index(bc)
                δG_bc[i, b] = 1
                G_bc = G_bc + kron(Gi_bc, δG_bc)
                δG_bc[i, b] = 0

            # Build LHS matrices
            for name in names:
                C = LHS[name]
                eq_expr, eq_vars = eq[name]
                if eq_expr != 0:
                    Ei = eq_expr.operator_dict(self.global_index,
                                               eq_vars,
                                               cacheid=cacheid,
                                               **problem.ncc_kw)
                else:
                    Ei = defaultdict(int)
                if differential:
                    bc_expr, bc_vars = bc[name]
                    if bc_expr != 0:
                        Bi = bc_expr.operator_dict(self.global_index,
                                                   bc_vars,
                                                   cacheid=cacheid,
                                                   **problem.ncc_kw)
                    else:
                        Bi = defaultdict(int)
                for j in range(nvars):
                    # Build equation terms
                    Eij = Ei[eq_vars[j]]
                    if Eij is 0:
                        Eij = None
                    elif Eij is 1:
                        Eij = Gi_eq
                    else:
                        Eij = Gi_eq * Eij
                    # Build BC terms
                    if differential:
                        Bij = Bi[bc_vars[j]]
                        if Bij is 0:
                            Bij = None
                        elif Bij is 1:
                            Bij = Gi_bc
                        else:
                            Bij = Gi_bc * Bij
                    else:
                        Bij = None
                    # Combine equation and BC
                    if (Eij is None) and (Bij is None):
                        continue
                    elif Eij is None:
                        Cij = Bij
                    elif Bij is None:
                        Cij = Eij
                    else:
                        Cij = Eij + Bij
                    # Kronecker into system
                    δC[i, j] = 1
                    C = C + kron(Cij, δC)
                    δC[i, j] = 0
                LHS[name] = C

        if compound:
            # Add match terms
            L = LHS['L']
            δM = np.identity(nvars)
            L = L + kron(Ra * M, δM)
            LHS['L'] = L

        if dirichlet:
            # Build right-preconditioner for system
            δD = np.zeros([nvars, nvars])
            D = 0
            for i, var in enumerate(problem.variables):
                if problem.meta[var][zbasis.name]['dirichlet']:
                    Dii = zbasis.Dirichlet
                else:
                    Dii = Identity
                δD[i, i] = 1
                D = D + kron(Dii, δD)
                δD[i, i] = 0
            self.JD = D.tocsr()
            self.JD.eliminate_zeros()

        # Store minimum CSR matrices for fast dot products
        for name, matrix in LHS.items():
            matrix.eliminate_zeros()
            setattr(self, name, matrix.tocsr())

        # Apply Dirichlet recombination if applicable
        if dirichlet:
            for name in names:
                LHS[name] = LHS[name] * self.JD

        # Store expanded CSR matrices for fast combination
        self.LHS = zeros_with_pattern(*LHS.values()).tocsr()
        for name, matrix in LHS.items():
            matrix = expand_pattern(matrix, self.LHS)
            setattr(self, name + '_exp', matrix.tocsr())

        # Store operators for RHS
        G_eq.eliminate_zeros()
        self.G_eq = G_eq
        G_bc.eliminate_zeros()
        self.G_bc = G_bc
Example #59
0
def identity(n):
    return sparse.identity(n)
Example #60
0
 def _identity_theta(self) -> csc_matrix:
     r"""
     Identity operator acting only on the `\theta` Hilbert subspace.
     """
     dim_theta = 2 * self.ncut + 1
     return sparse.identity(dim_theta, format="csc")