def find_gb_local(theta,phi,system, fm_width, n_orbitals=2, delta=0.01, omega=0, topo=True):
	"""
	Find localized Gilbert Damping as function of theta,phi for individual atomic sites.
	"""
	n_layers = system.n_layers
	u_theta = np.array([[np.cos(theta/2), 1*np.sin(theta/2)],[-1*np.sin(theta/2),np.cos(theta/2)]], dtype=np.complex_)
	u_phi = np.array([[np.exp(1.j*phi/2),0],[0,np.exp(-1.j*phi/2)]], dtype=np.complex_)
	u = np.dot(u_theta,u_phi)
	u_dag = calc.hermitian(u)
	v = sparse.kron(np.identity(n_orbitals*system.sites),np.dot(u_dag,np.dot(np.array([[0,1],[0,0]], dtype=np.complex_),u))).toarray()
	v_array = np.zeros((n_layers, n_layers), dtype=np.complex_)	
	for num in range(fm_width):
		v_array[num,num] = 1
	v_array = sparse.kron(v_array,v).toarray()
	img = (system.greens_ret - calc.hermitian(system.greens_ret)) / 2.j
	system.update_energy(system.energy+omega)
	img_2 =(system.greens_ret - calc.hermitian(system.greens_ret)) / 2.j 
	d = (delta**2)*np.diag(np.dot(img,np.dot(v_array,np.dot(img_2,calc.hermitian(v_array)))))
	if topo:
		lgb = d[1::4] + d[::4]+d[2::4]+d[3::4]
		dos = np.diag(img)
		ldos_maj = dos[::4] + dos[2::4]
		ldos_min = dos[1::4] + dos[3::4]
	else:
		lgb = d[1::2] + d[::2]
		dos = np.diag(img)
		ldos_maj = dos[::2]
		ldos_min = dos[1::2]
	return [np.real(lgb),np.real(ldos_maj),np.real(ldos_min),np.real(dos)]
def mod_greens(system,tol,magmom=1,delta=0.02,boson_dist=0,max_iters=500,mixing=0.5):
	greens = system.greens_ret
	greens_list = []
	greens_list.append(greens)
	i = 0
	iters = 0
	diff = 1000
	while diff>tol:
		iters+=1
		i = 1
		if iters>=max_iters:
			break
		greens_up = greens[::2,::2]
		img_1 = greens_up / 1.j
		greens_down = greens[1::2,1::2]
		img_2 = greens_down / 1.j
		img_1 = sparse.kron(img_1,np.array([[0,0],[0,1]]))
		img_2 = sparse.kron(img_2,np.array([[1,0],[0,0]]))
		img_comb = img_1 + img_2
		coeff = (1.j)*(delta**2)*(1/magmom)*(boson_dist + 0.5)
		sigma_l = system.sigma_l + coeff*img_comb
		greens_new = (mixing)*system.combine(system.hamil, system.e_array, sigma_l, system.sigma_r, system.sites, system.n_layers)+(1-mixing)* greens
		greens = greens_new
		greens_list.append(greens)
		diff = linalg.norm(greens_list[i]-greens_list[i-1])
		greens_list.pop(0)
	return greens
Пример #3
0
    def __init__(self, fine_bp, coarse_bp, int_orders, rstr_orders, *args, **kwargs):
        """Initialization routine for transfer operators

        Args:
            fine_bp : BlockProblem on fine level
            coarse_bp : Blockproblem on coarse level
            int_orders (list of int):
            coarse_orders (list of int):
            *args: Variable length argument list
            **kwargs: Arbitrary keyword arguments
        """

        assert isinstance(fine_bp, BlockProblemBase)
        assert isinstance(coarse_bp, BlockProblemBase)
        assert fine_bp.n_layer == len(int_orders) and coarse_bp.n_layer == len(rstr_orders)

        f_grids = map(lambda x: np.linspace(0, 1, x.shape[0]), fine_bp.As[0])
        c_grids = map(lambda x: np.linspace(0, 1, x.shape[0]), coarse_bp.As[0])

        intpl_matrices = map(lambda fg, cg, x: interpolation_matrix_1d(fg, cg, x),
                             f_grids, c_grids, int_orders)
        rstr_matrices = map(lambda fg, cg, x: 0.5 * interpolation_matrix_1d(fg, cg, x).transpose(),
                            f_grids, c_grids, rstr_orders)

        self.I_2htoh = reduce(lambda a, b: sp.kron(a, b, format='csc'), intpl_matrices)
        self.I_hto2h = reduce(lambda a, b: sp.kron(a, b, format='csc'), rstr_matrices)
        super(EquidistantBlockTransfer, self).__init__(fine_bp.ndofs, coarse_bp.ndofs, *args, **kwargs)
Пример #4
0
def rand_Jacobian_matrix(n):
  
  I = SS.eye(n,n)
  data = NY.random.rand(3,n)
  B = SS.spdiags(data,[0,-1,1],n,n)
  A = SS.kron(B,I) + SS.kron(I,B)
  return A
def enlarge_block(block):
    """This function enlarges the provided Block by a single site, returning an
    EnlargedBlock.
    """
    mblock = block.basis_size
    o = block.operator_dict

    # Create the new operators for the enlarged block.  Our basis becomes a
    # Kronecker product of the Block basis and the single-site basis.  NOTE:
    # `kron` uses the tensor product convention making blocks of the second
    # array scaled by the first.  As such, we adopt this convention for
    # Kronecker products throughout the code.
    enlarged_operator_dict = {
        "H": kron(o["H"], identity(model_d)) + kron(identity(mblock), H1) + H2(o["conn_Sz"], o["conn_Sp"], Sz1, Sp1),
        "conn_Sz": kron(identity(mblock), Sz1),
        "conn_Sp": kron(identity(mblock), Sp1),
    }

    # This array keeps track of which sector each element of the new basis is
    # in.  `np.add.outer()` creates a matrix that adds each element of the
    # first vector with each element of the second, which when flattened
    # contains the sector of each basis element in the above Kronecker product.
    enlarged_basis_sector_array = np.add.outer(block.basis_sector_array, single_site_sectors).flatten()

    return EnlargedBlock(length=(block.length + 1),
                         basis_size=(block.basis_size * model_d),
                         operator_dict=enlarged_operator_dict,
                         basis_sector_array=enlarged_basis_sector_array)
Пример #6
0
    def _K(self, g, beta, ifix=None):
        """ Returns the discretised integral operator
        """
        if ifix is None:
            ifix = self.ifix
    
        g = g.reshape(self.dim.R, self.dim.N).T        # set g as g_{nr}, n=1..N, r=1..R
        g = np.column_stack((np.ones(self.dim.N), g))  # append a col. of 1s to g

        # matrices A[r] = sum(b_rd * Ld
        struct_mats = np.array([sum(brd*Ld for brd, Ld in zip(br, self.basis_mats))
                                for br in beta])
        # Add an axis for n=0,...,N-1
        At = struct_mats[None, :, :, :] * g[..., None, None]
        At = At.sum(1)

        W = weight_matrix(self.ttc, ifix) #self._weight_matrix
        K = sum(sparse.kron(sparse.kron(W[:, i][:, None],
                                        sparse.eye(1, self.dim.N, i)),
                            At[i, ...])
                for i in range(self.dim.N))
        I = sparse.eye(self.dim.K)

        # because x[ifix] is fixed by the integral transform
        # we need to add a column of identity matrices to K
        # - could do this by assignment
        eifix = sparse.eye(1, self.dim.N, ifix)
        K += sum(sparse.kron(sparse.kron(sparse.eye(1, self.dim.N, i).transpose(),
                                         eifix),
                             I)
                 for i in range(self.dim.N))
        return K
Пример #7
0
 def _laplace(self):
   '''
   construct Laplace operator as a matrix
   based on user input filter.
   Actually this matrix can be thought
   as a convolution operator:
   f(x,z)*U(x,z)
   '''
   f=[-1.,2.,-1.]
   nx = nz = self.n
   nf = len(f)
   nonzero = np.ones((nf,nx))
   for i in range(nf):
     nonzero[i] *=f[i]
   offsets = array(range(nf/2,-nf/2 ,-1))
 
   m1 = dia_matrix((nonzero,offsets),shape=(nx,nx))
   m2 = identity(nz)
   k1 = kron(m1,m2)
   nonzero = np.ones((nf,nz))
   for i in range(nf):
     nonzero[i,:] *=f[i]
   m1 = dia_matrix((nonzero,offsets),shape=(nz,nz))
   m2 = identity(nx)
   k2 = kron(m2,m1)   
   return k1+ k2
Пример #8
0
def _conserved_get_proj(p_basis,dtype):
	np_min = p_basis._n.min()
	np_max = p_basis._n.max()
	v_ph = _np.zeros((np_max+1,1),dtype=_np.int32)

	proj_1 = p_basis._b1.get_proj(dtype).tocsc()
	proj_1_mask = _sp.csc_matrix(proj_1.shape,dtype=dtype)

	v_ph[np_min] = 1
	mask = p_basis._n == np_min
	proj_1_mask[:,mask] = proj_1[:,mask]

	proj_1_full = _sp.kron(proj_1_mask,v_ph,format="csr")

	proj_1_mask[:,mask]=0.0
	proj_1_mask.eliminate_zeros()
	v_ph[np_min] = 0


	for np in xrange(np_min+1,np_max+1,1):
		v_ph[np] = 1
		mask = p_basis._n == np
		proj_1_mask[:,mask] = proj_1[:,mask]

		proj_1_full = proj_1_full + _sp.kron(proj_1_mask,v_ph,format="csr")

		proj_1_mask[:,mask]=0.0
		proj_1_mask.eliminate_zeros()
		v_ph[np] = 0		
Пример #9
0
	def __init__(self, Y, a, b, c, d, H, q=None):
		'''
		Q_{i,j} = a*(y_i*y_j)^3+b*(y_i*y_j)^2+c*(y_i+y_j) + d - h_i*h_j
		q = q
		'''
		super(AMF_deg3_BQP, self).__init__()
		n, l =  Y.shape
		self.a = a
		self.b = b
		self.c = c
		self.d = d
		self.Y1 = Y
		self.H = H # mostly updated
		if q is None:
			self.q = np.zeros(n)
		else:
			self.q = q
		# consider 2nd and 3nd power of Y
		Y2_tmp = []
		Y3_tmp = []
		for i in xrange(n):
			y = Y.getrow(i)
			Y2_tmp.append(kron(y, y))
			Y3_tmp.append(kron(kron(y, y), y))
		self.Y2 = vstack(Y2_tmp).tocsr()
		self.Y3 = vstack(Y3_tmp).tocsr()
Пример #10
0
def pauli_x(n, N):
    '''compute the pauli_x operator acting on the n^th spin of N'''

    px = Pauli['x']
    e1, e2 = sp.eye(2**(n-1)), sp.eye(2**(N-n))

    return sp.kron(e1, sp.kron(px, e2))
Пример #11
0
    def vectorize(self, dependency_graph):
        nodes = dependency_graph.nodes

        if self.operator == 'head':
            assert len(nodes[0]['deps']) == 1

            head_address, = nodes[0]['deps']['ROOT']
            return self.node_to_vector(nodes[head_address])

        elif self.operator in ('add', 'mult'):
            tokens = tuple((node['lemma'], node['tag']) for node in nodes.values() if node['address'])
            return getattr(self.space, self.operator)(*tokens)

        elif self.operator in self.transitive_operators:
            assert graph_signature(dependency_graph) == transitive_sentence(self.tagset)

            subject = self.node_to_vector(nodes[1])
            verb = self.node_to_vector(nodes[2])
            object_ = self.node_to_vector(nodes[3])

            if self.operator == 'kron':
                verb_matrix = sparse.kron(verb, verb, format='csr')
                subject_object = sparse.kron(subject, object_, format='csr')

                return verb_matrix.multiply(subject_object)

            else:
                raise NotImplemented('Operator {} is not implemented'.format(self.operator))
        else:
            raise ValueError('Operator {} is not supported'.format(self.operator))
Пример #12
0
def die(first_noun, second_noun, trans_verb):
    """Vectorize a sentence with 'noun die noun verb' = (sub, obj)."""
    noun_model = space.words.polyglot_model()
    noun_space = noun_model[0]

    die_vector = compose.train.die_cat_stored()
    ver_vector = compose.train.verb(trans_verb, noun_model)

    fst_vector = noun_space[first_noun]
    snd_vector = noun_space[second_noun]

    par_vector_sub = kron(
        csr_matrix(snd_vector), csr_matrix(ver_vector))
    par_vector_obj = kron(
        csr_matrix(snd_vector), numpy.transpose(csr_matrix(ver_vector)))

    par_vector_sub = kron(
        numpy.transpose(csr_matrix(fst_vector)), csr_matrix(par_vector_sub))
    par_vector_obj = kron(
        numpy.transpose(csr_matrix(fst_vector)), csr_matrix(par_vector_obj))

    vector_sub = numpy.multiply(csr_matrix(die_vector), par_vector_sub)
    vector_obj = numpy.multiply(csr_matrix(die_vector), par_vector_obj)

    return (vector_sub.toarray().flatten(), vector_obj.toarray().flatten())
def get_preconditioner():
    """Compute the preconditioner M"""
    diags_x = zeros((3, nx))
    diags_x[0,:] = 1/hx/hx
    diags_x[1,:] = -2/hx/hx
    diags_x[2,:] = 1/hx/hx
    Lx = spdiags(diags_x, [-1,0,1], nx, nx)

    diags_y = zeros((3, ny))
    diags_y[0,:] = 1/hy/hy
    diags_y[1,:] = -2/hy/hy
    diags_y[2,:] = 1/hy/hy
    Ly = spdiags(diags_y, [-1,0,1], ny, ny)

    J1 = kron(Lx, eye(ny)) + kron(eye(nx), Ly)

    # Now we have the matrix `J_1`. We need to find its inverse `M` --
    # however, since an approximate inverse is enough, we can use
    # the *incomplete LU* decomposition

    J1_ilu = spilu(J1)

    # This returns an object with a method .solve() that evaluates
    # the corresponding matrix-vector product. We need to wrap it into
    # a LinearOperator before it can be passed to the Krylov methods:

    M = LinearOperator(shape=(nx*ny, nx*ny), matvec=J1_ilu.solve)
    return M
Пример #14
0
 def fget(self):
     if(self._nodalLaplacian is None):
         print 'Warning: Laplacian has not been tested rigorously.'
         # The number of cell centers in each direction
         n = self.vnC
         # Compute divergence operator on faces
         if(self.dim == 1):
             D1 = sdiag(1./self.hx) * ddx(mesh.nCx)
             L  = - D1.T*D1
         elif(self.dim == 2):
             D1 = sdiag(1./self.hx) * ddx(n[0])
             D2 = sdiag(1./self.hy) * ddx(n[1])
             L1 = sp.kron(speye(n[1]+1), - D1.T * D1)
             L2 = sp.kron(- D2.T * D2, speye(n[0]+1))
             L  = L1 + L2
         elif(self.dim == 3):
             D1 = sdiag(1./self.hx) * ddx(n[0])
             D2 = sdiag(1./self.hy) * ddx(n[1])
             D3 = sdiag(1./self.hz) * ddx(n[2])
             L1 = kron3(speye(n[2]+1), speye(n[1]+1), - D1.T * D1)
             L2 = kron3(speye(n[2]+1), - D2.T * D2, speye(n[0]+1))
             L3 = kron3(- D3.T * D3, speye(n[1]+1), speye(n[0]+1))
             L  = L1 + L2 + L3
         self._nodalLaplacian = L
     return self._nodalLaplacian
Пример #15
0
def srmax_loop(D, env, w, track, damping=0.001, rmax = 1.0):
    """
    Sparse rmax loop.
    """
    k = len(w)
    A = sp.identity(k,format='csr') * damping
    b = sp_create(k,1,'csr')
    grmax = rmax / (1.0 - env.gamma)

    for (s,a,r,ns,na) in D:
        if track.known_pair(s,a) and track.known_state(ns):
            features = env.phi(s, a, sparse=True, format='csr')
            next = env.linear_policy(w, ns)
            newfeatures = env.phi(ns, next, sparse=True, format='csr')
            nf = features - env.gamma * newfeatures
            T = sp.kron(features, nf.T)
            A = A + T
            b = b + features * r 
        elif track.known_pair(s,a):
            features = env.phi(s, a, sparse=True, format='csr')
            T = sp.kron(features, features.T)
            A = A + T
            b = b + features * (r + env.gamma * grmax)
        else:            
            features = env.phi(s, a, sparse=True, format='csr')
            T = sp.kron(features, features.T)
            A = A + T
            b = b + features * grmax
        for una in track.unknown(s):
            features = env.phi(s, una, sparse=True, format='csr')
            T = sp.kron(features, features.T)
            A = A + T
            b = b + features * grmax

    return A,b
    def __init__(self,bksize,numbk):
   
        n = 2*bksize*numbk      # Degrees of freedom per dimension  
        
        self.L = neglap2d(n)

        T = spdiags(np.ones((2,n)),np.array([0,1]),n,n) # Matrix for overlaps
    
        Id = np.identity(4)
        
        Esz = np.ones((bksize,bksize))     # Template for size of blocks
        Enm = np.ones((numbk,numbk))       # Template for numbe of blocks     
        
        # Four colors of subdomain 
        self.colors = ('blue','yellow','red','green')   

        self.dex = []     # Indices of colored subdomains
        self.S = []       # Operator on each subdomain

        for k in xrange(4):  # Loop over four colors of subdomains

            # Determine color of subdomains associated with grid points
            q = csr_matrix(np.reshape(Id[:,k],(2,2)))
            mat = T*kron(kron(Enm,q),Esz)*T
            row,col = mat.nonzero()
            
            self.dex.append(n*row+col)               
            self.S.append(self.L[:,self.dex[k]][self.dex[k],:])     
Пример #17
0
def self_energy(E, H, t, sigma_0, a, iterations,N):
	"""
	Finds the self-energy at a given supercell by using a mixing
	coefficient. More iterations will give a more accurate sigma (self-energy)
	value, although there may not be convergence in all cases.
	"""
	t_array = sparse.kron(np.identity(N), t).toarray()
	t_dagger_array = hermitian(t_array)
	e_array = sparse.kron(np.identity(H.shape[0]), E).toarray()	
	sigma = sparse.kron(np.eye(H.shape[0], dtype=np.complex_),sigma_0).toarray()
	tol = 1e-7
	i = 0
	sigmas = []
	sigmas.append(sigma)
	i = 0
	iters = 0
	diff = 1000
	while diff>tol:
		sigma = (sigma * a) + np.dot(t_dagger_array,np.dot(linalg.inv(e_array - H - sigma),t_array)) * (1 - a)
		sigmas.append(sigma)
		i = 1
		iters += 1
		diff = linalg.norm(np.absolute(sigmas[i]-sigmas[i-1]))
		if iters > 1000:
			break
		sigmas.pop(0)
	return sigma
Пример #18
0
def sparse_cH(terms, ldim=2):
    """Construct a sparse cyclic nearest-neighbour Hamiltonian

    :param terms: List of nearst-neighbour terms (square array or MPO,
        see return value of :func:`cXY_local_terms`)
    :param ldim: Local dimension

    :returns: The Hamiltonian as sparse matrix

    """
    H = 0
    N = len(terms)
    for pos, term in enumerate(terms[:-1]):
        if hasattr(term, 'lt'):
            # Convert MPO to regular matrix
            term = term.to_array_global().reshape((ldim**2, ldim**2))
        left = sp.eye(ldim**pos)
        right = sp.eye(ldim**(N - pos - 2))
        H += sp.kron(left, sp.kron(term, right))
    # The last term acts on the first and last site.
    cyc = terms[-1]
    middle = sp.eye(ldim**pos)
    for i in range(cyc.ranks[0]):
        H += sp.kron(cyc.lt[0][0, ..., i], sp.kron(middle, cyc.lt[1][i, ..., 0]))
    return H
Пример #19
0
def gen_gamma(N):
    '''compute sum of tunneling term for a single zone'''
    
    if N==1:
        return sx
    
    return sp.kron(gen_gamma(N-1), eye) + sp.kron(sp.eye(2**(N-1)), sx)
Пример #20
0
def construct_hams(hs, Js, Jxs):
    '''Construct static Hamiltonians'''
    
    Nz = len(hs)     # number of zones
    
    H0s = [construct_H0(hs[i], Js[i]) for i in xrange(Nz)]
    Gammas = [gen_gamma(len(hs[i])) for i in xrange(Nz)]

    M = [H.size for H in H0s]   # number of states per zone
    C = [1]+list(np.cumprod(M))     # cumprod of H0 sizes

    # pad single zone Hamiltonians
    for i in xrange(Nz):
        H0s[i] = np.tile(np.repeat(H0s[i], C[-1]/C[i+1]), [1, C[i]])
        Gammas[i] = sp.kron(sp.eye(C[i]), sp.kron(Gammas[i], sp.eye(C[-1]/C[i+1])))
    
    # only need sum of H0s
    H0 = np.sum(np.array(H0s), axis=0)
    
    # add zone-zone interactions to H0
    for i,j in Jxs:
        Jx = Jxs[(i,j)]
        Hx = np.zeros([1, M[i]*M[j]], dtype=float)
        for n,m in zip(*Jx.nonzero()):
            Hx += Jx[n, m]*np.kron(pauli_z(n+1, Jx.shape[0]),
                                   pauli_z(m+1, Jx.shape[1]))
        if np.any(Hx):
            H0 += np.tile(np.repeat(Hx, C[i]), [1, C[-1]/C[j+1]])
    
    H0 = sp.diags(H0, [0])
    return H0, Gammas
Пример #21
0
	def __init__(self,lhgen,rhgen,target_sector=0.,joint=True):
		self.lhgen=deepcopy(lhgen)
		self.rhgen=deepcopy(rhgen)
		self.L=self.lhgen.l+self.rhgen.l
		self.H=kron(self.lhgen.H,identity(self.rhgen.D))+kron(identity(self.lhgen.D),self.rhgen.H)
		if joint==True:
			for lpterm in self.lhgen.pterms:
				for rpterm in self.rhgen.pterms:
					if lpterm.label==rpterm.label: #label must include all the important imformation
						self.H=self.H+kron(lpterm.current_op.mat,rpterm.current_op.mat)*lpterm.param

		self.target_sector=target_sector
		self.sector_indices={}
		self.rsector_indices={}
		self.restricted_basis_indices=[]
		for sys_sec,sys_basis_states in self.lhgen.basis_by_sector.items():
			self.sector_indices[sys_sec]=[]
			env_sec=target_sector-sys_sec
			if env_sec in self.rhgen.basis_by_sector:
				self.rsector_indices[env_sec]=[]
				for i in sys_basis_states:
					i_offset=self.rhgen.D*i
					for j in self.rhgen.basis_by_sector[env_sec]:
						current_index=len(self.restricted_basis_indices)
						self.sector_indices[sys_sec].append(current_index)
						self.rsector_indices[env_sec].append(current_index)
						self.restricted_basis_indices.append(i_offset+j)
		self.restricted_superblock_hamiltonian=self.H.todense()[:,self.restricted_basis_indices][self.restricted_basis_indices,:]
Пример #22
0
def randomModel(shape, seed=None, anisotropy=None, its=100, bounds=None):
    """
        Create a random model by convolving a kernel with a
        uniformly distributed model.

        :param tuple shape: shape of the model.
        :param int seed: pick which model to produce, prints the seed if you don't choose.
        :param numpy.ndarray anisotropy: this is the (3 x n) blurring kernel that is used.
        :param int its: number of smoothing iterations
        :param list bounds: bounds on the model, len(list) == 2
        :rtype: numpy.ndarray
        :return: M, the model


        .. plot::

            import matplotlib.pyplot as plt
            import SimPEG.Utils.ModelBuilder as MB
            plt.colorbar(plt.imshow(MB.randomModel((50,50),bounds=[-4,0])))
            plt.title('A very cool, yet completely random model.')
            plt.show()


    """
    if bounds is None:
        bounds = [0,1]

    if seed is None:
        seed = np.random.randint(1e3)
        print 'Using a seed of: ', seed

    if type(shape) in [int, long, float]:
        shape = (shape,) # make it a tuple for consistency

    np.random.seed(seed)
    mr = np.random.rand(*shape)
    if anisotropy is None:
        if len(shape) is 1:
            smth = np.array([1,10.,1],dtype=float)
        elif len(shape) is 2:
            smth = np.array([[1,7,1],[2,10,2],[1,7,1]],dtype=float)
        elif len(shape) is 3:
            kernal = np.array([1,4,1], dtype=float).reshape((1,3))
            smth = np.array(sp.kron(sp.kron(kernal,kernal.T).todense()[:],kernal).todense()).reshape((3,3,3))
    else:
        assert len(anisotropy.shape) is len(shape), 'Anisotropy must be the same shape.'
        smth = np.array(anisotropy,dtype=float)

    smth = smth/smth.sum() # normalize
    mi = mr
    for i in range(its):
        mi = ndi.convolve(mi, smth)

    # scale the model to live between the bounds.
    mi = (mi - mi.min())/(mi.max()-mi.min()) # scaled between 0 and 1
    mi = mi*(bounds[1]-bounds[0])+bounds[0]


    return mi
Пример #23
0
def sfermion(L):
	H=np.zeros((4,4))
	d=1
	for i in range(L-1):
		H=kron(H,identity(4))+t*kron(kron(identity(d),Cp_up.dot(Zs)),Cm_up)-t*kron(kron(identity(d),Cm_up.dot(Zs)),Cp_up)+t*kron(kron(identity(d),Cp_dn.dot(Zs)),Cm_dn)-t*kron(kron(identity(d),Cm_dn.dot(Zs)),Cp_dn)
		d*=4
	E0,psi0=eigsh(H,k=1,which='SA')
	print E0/L		
Пример #24
0
 def __init__(self, transfer_list, *args, **kwargs):
     for t in transfer_list:
         assert hasattr(t, 'I_2htoh') and hasattr(t, 'I_hto2h')
     self.I_2htoh = reduce(lambda a, b: sp.kron(a.I_2htoh, b.I_2htoh, format='csc'), transfer_list)
     self.I_hto2h = reduce(lambda a, b: sp.kron(a.I_hto2h, b.I_hto2h, format='csc'), transfer_list)
     n_f = reduce(lambda a, b: a.ndofs_fine * b.ndofs_fine, transfer_list)
     n_c = reduce(lambda a, b: a.ndofs_coarse * b.ndofs_coarse, transfer_list)
     super(CombinedBlockTransfer, self).__init__(n_f, n_c)
Пример #25
0
def localop(Operator, site, chainlength, dimpersite):
    II = I(dimpersite)
    sitesbefore = site - 1
    sitesafter = chainlength - site

    O1 = bigkronecker(II, sitesbefore)
    O2 = bigkronecker(II, sitesafter)
    return sparse.kron(sparse.kron(O1, site), O2)
 def H2(self, b1, b2):  # two-site part of H
     """Given the operator b on two sites in different Hilbert spaces
     (e.g. two blocks), returns a Kronecker product representing the
     corresponding two-site term in the Hamiltonian that joins the two
     sites.
     """
     return -self.t * (kron(b1, b2.conjugate().transpose()) +
                       kron(b1.conjugate().transpose(), b2))
Пример #27
0
def BuildLaPoisson():
    """
    pour l'etape de projection
    matrice de Laplacien phi
    avec CL Neumann pour phi

    BUT condition de Neumann pour phi 
    ==> non unicite de la solution

    besoin de fixer la pression en un point 
    pour lever la degenerescence: ici [0][1]
    
    ==> need to build a correction matrix

    """
    ### ne pas prendre en compte les points fantome (-2)
    NXi = nx
    NYi = ny

    ###### Definition of the 1D Lalace operator

    ###### AXE X
    ### Diagonal terms
    dataNXi = [numpy.ones(NXi), -2*numpy.ones(NXi), numpy.ones(NXi)]   
    
    ### Conditions aux limites : Neumann à gauche, rien à droite
    dataNXi[2][1]     = 2.  # SF left
    # dataNXi[0][NXi-2] = 2.  # SF right

    ###### AXE Y
    ### Diagonal terms
    dataNYi = [numpy.ones(NYi), -2*numpy.ones(NYi), numpy.ones(NYi)] 
   
    ### Conditions aux limites : Neumann 
    dataNYi[2][1]     = 2.  # SF low
    dataNYi[0][NYi-2] = 2.  # SF top

    ###### Their positions
    offsets = numpy.array([-1,0,1])                    
    DXX = sp.dia_matrix((dataNXi,offsets), shape=(NXi,NXi)) * dx_2
    DYY = sp.dia_matrix((dataNYi,offsets), shape=(NYi,NYi)) * dy_2
    
    ####### 2D Laplace operator
    LAP = sp.kron(sp.eye(NYi,NYi), DXX) + sp.kron(DYY, sp.eye(NXi,NXi))
    
    ####### BUILD CORRECTION MATRIX

    ### Upper Diagonal terms
    dataNYNXi = [numpy.zeros(NYi*NXi)]
    offset = numpy.array([1])

    ### Fix coef: 2+(-1) = 1 ==> Dirichlet en un point (redonne Laplacien)
    ### ATTENTION  COEF MULTIPLICATIF : dx_2 si M(j,i) j-NY i-NX
    dataNYNXi[0][1] = -1 * dx_2

    LAP0 = sp.dia_matrix((dataNYNXi,offset), shape=(NYi*NXi,NYi*NXi))
  
    return LAP + LAP0
def generate_plot():
	h_bar = 6.582E-16
	q = 1
	a = 1E-10
	t = 1
	c = 3.0E8
	g = -2.002
	N = 1
	E = -1
	Ez = 1000
	eta = 0.01 + (0.01)*1.j
	sigma_x = np.array([[0,1],[1,0]])
	sigma_y = np.array([[0, -1.j],[1.j,0]])
	kxs = []
	alphas = []
	stxs = []
	stys = []
	for kx in pl.frange(0, 2*np.pi, 0.1):
		kxs.append(kx)
		kys = []
		alphas_row = []
		stxs_row = []
		stys_row = []
		for ky in pl.frange(0, 2*np.pi, 0.1):
			coeff = (-1)*g*q*(1/(h_bar**2))*(a**2)*(t**2)*(1/(2*c**2))
			#print(coeff)
			hamil = sparse.kron(np.identity(2, dtype=np.complex_), t*(np.cos(kx)+np.cos(ky)))
			hamil += coeff*(np.cos(kx) + np.cos(ky))*(Ez*np.sin(ky)*sigma_x - Ez*np.sin(kx)*sigma_y)
			E_arr = sparse.kron(np.identity(2, dtype=np.complex_),E).toarray()
			greens = linalg.inv(E_arr-hamil-eta)
			img = (greens - calc.hermitian(greens))/(2.j)
			stxs_row.append(np.trace(np.dot(img,sigma_x))/2)
			stys_row.append(np.trace(np.dot(img,sigma_y))/2)
			kys.append(ky)
			alpha = np.trace(img)/2
			alphas_row.append(alpha)
		#print(stxs_row)
		alphas.append(alphas_row)
		stxs.append(stxs_row)
		stys.append(stys_row)
		print(kx)
	print('loop over')	
	x, y = np.meshgrid(kxs, kys)
	print('here')
	#print(alphas)
	alphas = np.array(alphas)
	stxs = np.array(stxs)
	stys = np.array(stys)
	print(stxs)
	#print(alphas)
	#fig = plt.figure()
	plt.pcolormesh(x, y, alphas)
	#plt.pcolormesh(x,y,stxs)
	plt.quiver(x, y, stxs, stys, color='red', angles='xy', scale_units='xy', scale=1)
	#plt.quiver(x, y, stys, color='red', headlength=10)
	print('mesh complete')
	#plt.colorbar()
	plt.show()
Пример #29
0
  def LaplacianSmooth(self, reference_mesh):
    # placing this down here for now in case people are having numpy/scipy problems
    import numpy
    import scipy.sparse as sparse
    import scipy.sparse.linalg

    num_vertices = len(self.vertices)
    num_boundary_vertices = len(self.boundary_vertices)
    num_non_boundary_vertices = num_vertices - num_boundary_vertices
    L = sparse.lil_matrix((num_vertices, num_vertices))
    C = sparse.lil_matrix((num_boundary_vertices, num_vertices))
    Cbar = sparse.lil_matrix((num_non_boundary_vertices, num_vertices))

    non_boundary_vertices_seen = 0
    boundary_vertices_seen = 0
    for i in xrange(num_vertices):
      if i in self.boundary_vertices:
        C[boundary_vertices_seen, i] = 1.0
        boundary_vertices_seen += 1
      else:
        Cbar[non_boundary_vertices_seen, i] = 1.0
        non_boundary_vertices_seen += 1
    assert (num_boundary_vertices == boundary_vertices_seen)
    assert (num_non_boundary_vertices == non_boundary_vertices_seen)
    C = sparse.kron(C, sparse.eye(3, 3)).tocsr()
    Cbar = sparse.kron(Cbar, sparse.eye(3, 3)).tocsr()

    edge_boundary_vertices = 0
    edge_non_boundary_vertices = 0
    for v0, v1 in self.edges:
      if v0 in self.boundary_vertices:
        edge_boundary_vertices += 1
      else:
        edge_non_boundary_vertices += 1
      if v1 in self.boundary_vertices:
        edge_boundary_vertices += 1
      else:
        edge_non_boundary_vertices += 1

      weight = 1.0
      L[v0,v0] -= weight
      L[v0,v1] += weight

      L[v1,v1] -= weight
      L[v1,v0] += weight
    L = sparse.kron(L, sparse.eye(3, 3)).tocsr()

    xtilde = numpy.array(self.vertices).flatten()
    y = numpy.array(reference_mesh.vertices).flatten()
    CbarLTL = Cbar * (L.T * L)
    b = CbarLTL * (y - C.T * (C * xtilde))

    xbar, info = sparse.linalg.cg(CbarLTL * Cbar.T, b)

    x = Cbar.T * xbar + C.T * (C * xtilde)

    self.vertices = x.reshape(numpy.array(self.vertices).shape)
Пример #30
0
def sparse_discrete_diff(n):
    '''
    Sparse discrete difference matrix on a grid
    :param n: for the n by n grid
    :return: sparse discrete difference matrix
    '''
    d = sparse.diags([-1, 1], [0, 1], shape = (n-1, n))
    id = sparse.identity(n)
    return sparse.vstack([sparse.kron(d,id), sparse.kron(id,d)])
Пример #31
0
    def configure(self,
                  H_sys,
                  coup_op,
                  coup_strength,
                  temperature,
                  N_cut,
                  N_exp,
                  cut_freq,
                  planck=None,
                  boltzmann=None,
                  renorm=None,
                  bnd_cut_approx=None,
                  options=None,
                  progress_bar=None,
                  stats=None):
        """
        Calls configure from :class:`HEOMSolver` and sets any attributes
        that are specific to this subclass
        """
        start_config = timeit.default_timer()

        HEOMSolver.configure(self,
                             H_sys,
                             coup_op,
                             coup_strength,
                             temperature,
                             N_cut,
                             N_exp,
                             planck=planck,
                             boltzmann=boltzmann,
                             options=options,
                             progress_bar=progress_bar,
                             stats=stats)
        self.cut_freq = cut_freq
        if renorm is not None: self.renorm = renorm
        if bnd_cut_approx is not None: self.bnd_cut_approx = bnd_cut_approx

        # Load local values for optional parameters
        # Constants and Hamiltonian.
        hbar = self.planck
        options = self.options
        progress_bar = self.progress_bar
        stats = self.stats

        if stats:
            ss_conf = stats.sections.get('config')
            if ss_conf is None:
                ss_conf = stats.add_section('config')

        c, nu = self._calc_matsubara_params()

        if renorm:
            norm_plus, norm_minus = self._calc_renorm_factors()
            if stats:
                stats.add_message('options', 'renormalisation', ss_conf)
        # Dimensions et by system
        sup_dim = H_sys.dims[0][0]**2
        unit_sys = qeye(H_sys.dims[0])

        # Use shorthands (mainly as in referenced PRL)
        lam0 = self.coup_strength
        gam = self.cut_freq
        N_c = self.N_cut
        N_m = self.N_exp
        Q = coup_op  # Q as shorthand for coupling operator
        beta = 1.0 / (self.boltzmann * self.temperature)

        # Ntot is the total number of ancillary elements in the hierarchy
        # Ntot = factorial(N_c + N_m) / (factorial(N_c)*factorial(N_m))
        # Turns out to be the same as nstates from state_number_enumerate
        N_he, he2idx, idx2he = enr_state_dictionaries([N_c + 1] * N_m, N_c)

        unit_helems = sp.identity(N_he, format='csr')
        if self.bnd_cut_approx:
            # the Tanimura boundary cut off operator
            if stats:
                stats.add_message('options', 'boundary cutoff approx', ss_conf)
            op = -2 * spre(Q) * spost(Q.dag()) + spre(Q.dag() * Q) + spost(
                Q.dag() * Q)

            approx_factr = ((2 * lam0 /
                             (beta * gam * hbar)) - 1j * lam0) / hbar
            for k in range(N_m):
                approx_factr -= (c[k] / nu[k])
            L_bnd = -approx_factr * op.data
            L_helems = sp.kron(unit_helems, L_bnd)
        else:
            L_helems = sp.csr_matrix((N_he * sup_dim, N_he * sup_dim),
                                     dtype=complex)

        # Build the hierarchy element interaction matrix
        if stats: start_helem_constr = timeit.default_timer()

        unit_sup = spre(unit_sys).data
        spreQ = spre(Q).data
        spostQ = spost(Q).data
        commQ = (spre(Q) - spost(Q)).data
        N_he_interact = 0

        for he_idx in range(N_he):
            he_state = list(idx2he[he_idx])
            n_excite = sum(he_state)

            # The diagonal elements for the hierarchy operator
            # coeff for diagonal elements
            sum_n_m_freq = 0.0
            for k in range(N_m):
                sum_n_m_freq += he_state[k] * nu[k]

            op = -sum_n_m_freq * unit_sup
            L_he = _pad_csr(op, N_he, N_he, he_idx, he_idx)
            L_helems += L_he

            # Add the neighour interations
            he_state_neigh = copy(he_state)
            for k in range(N_m):

                n_k = he_state[k]
                if n_k >= 1:
                    # find the hierarchy element index of the neighbour before
                    # this element, for this Matsubara term
                    he_state_neigh[k] = n_k - 1
                    he_idx_neigh = he2idx[tuple(he_state_neigh)]

                    op = c[k] * spreQ - np.conj(c[k]) * spostQ
                    if renorm:
                        op = -1j * norm_minus[n_k, k] * op
                    else:
                        op = -1j * n_k * op

                    L_he = _pad_csr(op, N_he, N_he, he_idx, he_idx_neigh)
                    L_helems += L_he
                    N_he_interact += 1

                    he_state_neigh[k] = n_k

                if n_excite <= N_c - 1:
                    # find the hierarchy element index of the neighbour after
                    # this element, for this Matsubara term
                    he_state_neigh[k] = n_k + 1
                    he_idx_neigh = he2idx[tuple(he_state_neigh)]

                    op = commQ
                    if renorm:
                        op = -1j * norm_plus[n_k, k] * op
                    else:
                        op = -1j * op

                    L_he = _pad_csr(op, N_he, N_he, he_idx, he_idx_neigh)
                    L_helems += L_he
                    N_he_interact += 1

                    he_state_neigh[k] = n_k

        if stats:
            stats.add_timing('hierarchy contruct',
                             timeit.default_timer() - start_helem_constr,
                             ss_conf)
            stats.add_count('Num hierarchy elements', N_he, ss_conf)
            stats.add_count('Num he interactions', N_he_interact, ss_conf)

        # Setup Liouvillian
        if stats: start_louvillian = timeit.default_timer()
        H_he = sp.kron(unit_helems, liouvillian(H_sys).data)

        L_helems += H_he

        if stats:
            stats.add_timing('Liouvillian contruct',
                             timeit.default_timer() - start_louvillian,
                             ss_conf)

        if stats: start_integ_conf = timeit.default_timer()

        r = scipy.integrate.ode(cy_ode_rhs)

        r.set_f_params(L_helems.data, L_helems.indices, L_helems.indptr)
        r.set_integrator('zvode',
                         method=options.method,
                         order=options.order,
                         atol=options.atol,
                         rtol=options.rtol,
                         nsteps=options.nsteps,
                         first_step=options.first_step,
                         min_step=options.min_step,
                         max_step=options.max_step)

        if stats:
            time_now = timeit.default_timer()
            stats.add_timing('Liouvillian contruct',
                             time_now - start_integ_conf, ss_conf)
            if ss_conf.total_time is None:
                ss_conf.total_time = time_now - start_config
            else:
                ss_conf.total_time += time_now - start_config

        self._ode = r
        self._N_he = N_he
        self._sup_dim = sup_dim
        self._configured = True
Пример #32
0
    def DVR_Matrix_3D(self):
        #X
        DVRMatrix3Dx = []
        FORTRANoffset = 1
        for i in xrange(self.cfg.NGRIDX):
            for idash in xrange(self.cfg.NGRIDX):
                if i == idash:
                    DVRMatrix3Dx.append([
                        FORTRANoffset + i, FORTRANoffset + idash,
                        self.cfg.HBAR * (-1.0)**np.abs(i - idash) /
                        (2.0 * self.cfg.MASS * self.cfg.h**2.0) *
                        np.pi**(2.0) / 3.0, 1.0
                    ])
                elif i != idash:
                    DVRMatrix3Dx.append([
                        FORTRANoffset + i, FORTRANoffset + idash,
                        self.cfg.HBAR * (-1.0)**np.abs(i - idash) /
                        (2.0 * self.cfg.MASS * self.cfg.h**2.0) * 2.0 /
                        (i - idash)**2.0, 1.0
                    ])
        DVRMatrix3Dx = np.array(DVRMatrix3Dx)
        rowx = DVRMatrix3Dx[:, 0] - 1
        colx = DVRMatrix3Dx[:, 1] - 1
        dataAx = DVRMatrix3Dx[:, 2]
        Ax = sp.coo_matrix((dataAx, (rowx, colx)),
                           shape=(self.cfg.NGRIDX, self.cfg.NGRIDX))
        #Y
        DVRMatrix3Dy = []
        FORTRANoffset = 1
        for i in xrange(self.cfg.NGRIDY):
            for idash in xrange(self.cfg.NGRIDY):
                if i == idash:
                    DVRMatrix3Dy.append([
                        FORTRANoffset + i, FORTRANoffset + idash,
                        self.cfg.HBAR * (-1.0)**np.abs(i - idash) /
                        (2.0 * self.cfg.MASS * self.cfg.hY**2.0) *
                        np.pi**(2.0) / 3.0, 1.0
                    ])
                elif i != idash:
                    DVRMatrix3Dy.append([
                        FORTRANoffset + i, FORTRANoffset + idash,
                        self.cfg.HBAR * (-1.0)**np.abs(i - idash) /
                        (2.0 * self.cfg.MASS * self.cfg.hY**2.0) * 2.0 /
                        (i - idash)**2.0, 1.0
                    ])
        DVRMatrix3Dy = np.array(DVRMatrix3Dy)
        rowy = DVRMatrix3Dy[:, 0] - 1
        coly = DVRMatrix3Dy[:, 1] - 1
        dataAy = DVRMatrix3Dy[:, 2]
        Ay = sp.coo_matrix((dataAy, (rowy, coly)),
                           shape=(self.cfg.NGRIDY, self.cfg.NGRIDY))

        #Y
        DVRMatrix3Dz = []
        FORTRANoffset = 1
        for i in xrange(self.cfg.NGRIDZ):
            for idash in xrange(self.cfg.NGRIDZ):
                if i == idash:
                    DVRMatrix3Dz.append([
                        FORTRANoffset + i, FORTRANoffset + idash,
                        self.cfg.HBAR * (-1.0)**np.abs(i - idash) /
                        (2.0 * self.cfg.MASS * self.cfg.hZ**2.0) *
                        np.pi**(2.0) / 3.0, 1.0
                    ])
                elif i != idash:
                    DVRMatrix3Dz.append([
                        FORTRANoffset + i, FORTRANoffset + idash,
                        self.cfg.HBAR * (-1.0)**np.abs(i - idash) /
                        (2.0 * self.cfg.MASS * self.cfg.hZ**2.0) * 2.0 /
                        (i - idash)**2.0, 1.0
                    ])
        DVRMatrix3Dz = np.array(DVRMatrix3Dz)
        rowz = DVRMatrix3Dz[:, 0] - 1
        colz = DVRMatrix3Dz[:, 1] - 1
        dataAz = DVRMatrix3Dz[:, 2]
        Az = sp.coo_matrix((dataAz, (rowz, colz)),
                           shape=(self.cfg.NGRIDZ, self.cfg.NGRIDZ))
        #XYZ
        D3 = sp.coo_matrix(
            sp.kron(sp.kron(Ax, sp.eye(self.cfg.NGRIDY)),
                    sp.eye(self.cfg.NGRIDZ)) +
            sp.kron(sp.kron(sp.eye(self.cfg.NGRIDX), Ay),
                    sp.eye(self.cfg.NGRIDZ)) +
            sp.kron(sp.kron(sp.eye(self.cfg.NGRIDX), sp.eye(self.cfg.NGRIDY)),
                    Az))
        #ADD V(x,y)
        return D3 + self.cfg.V
# list/ array to store transformation/ truncation matrices for wave function transformation.

UENVL = []
UENVR = []

# Construct Sz, S+ and S- for a single site

sqtwo = 1.41421356237309504880168872421

Sz = np.array([[1.0, 0, 0], [0, 0, 0], [0, 0, -1.0]], dtype='d')
Sp = np.array([[0, sqtwo, 0], [0, 0, sqtwo], [0, 0, 0]], dtype='d')
Sm = np.array([[0, 0, 0], [sqtwo, 0, 0], [0, sqtwo, 0]], dtype='d')

#Initial Hamiltonians for left and right sides; two site Hamiltonians.

HL = kron(Sz, Sz) + 0.5 * (kron(Sp, Sm) + kron(Sm, Sp))
HR = kron(Sz, Sz) + 0.5 * (kron(Sp, Sm) + kron(Sm, Sp))

# Adding HR to the list; refer to p. 1691 of notes

HRMAT.append(HR)
HLMAT.append(HL)

# Initialising matrices for growth

SztL = Sz
SptL = Sp
SmtL = Sm

SztR = Sz
SptR = Sp
Пример #34
0
            if z[k] == -1:
                T = T
            elif (z[k] >= m):
                T = T
            else:
                T[i, j, z[k]] = T[i, j, z[k]] + (-1)**k

# COMPUTE THE RANK OF THE KOSZUL FLATTENING
p = int(input("p = "))

d = int(nCr(m, p))
# S: B* \to A \otimes C
Tf = T.reshape(m, m * m)
Tf = sparse.csr_matrix(Tf)
print(Tf.shape)
#print(np.linalg.matrix_rank(Tf))

# Id_Skew \otimes S: L^p A \otimes B^* \to L^p A \otimes A \otimes C
K = sparse.kron(np.eye(d), Tf)
print(K.shape)

#Projection L^p \otimes A to L^{p+1}A
aa = a(int(m), int(p))
aa = sparse.csr_matrix(aa)
#Kronecker product with C
P = sparse.kron(aa.T, np.eye(m))
P = sparse.csr_matrix(P)
print(P.shape)
TAp = K @ P
print(TAp.shape)
Пример #35
0
 def radius(eps):
     H_star = np.linalg.inv(np.identity(len(Hc)) - Hc2.dot(eps ** 2)).dot(Hc)
     return approx_spectral_radius(kron(H_star, W).dot(eps) - kron(Hc.dot(H_star), D).dot(eps ** 2), pyamg=pyamg) - 1
Пример #36
0
def unbalance_longwas_fixed(data_file,
                            id,
                            tpoint,
                            trait,
                            bed_file,
                            kin_file,
                            var_com,
                            snp_lst=None,
                            tfix=None,
                            fix=None,
                            forder=3,
                            aorder=3,
                            porder=3,
                            na_method='omit',
                            prefix_outfile='unbalance_longwas_fixed'):
    """
    the longitudinal GWAS for the unbalanced data treating the SNP as the time varied fixed effect.
    :param data_file: the data file. The first row is the variate names whose first initial position is alphabetical.
    For the class variates, the first letter must be capital; for the covariates (continuous variates), the first letter
    must be lowercase.
    :param id: A class variate name which indicates the individual id column in the data file.
    :param tpoint: A covariate names which indicates the time point column in the data file.
    :param trait: A variate name which indicates the analyzed trait column in the data file.
    :param bed_file: the prefix for the plink binary file.
    :param kin_file: the file for genomic relationship matrix. This file can be produced by
    gmat.gmatrix.agmat function using agmat(bed_file, inv=True, small_val=0.001, out_fmt='id_id_val')
    :param var_com: the estimated variance parameters by the gmat.longwas.unbalance.unbalance_varcom function.
    :param snp_lst: the snp list to test. Default is None.
    :param tfix: A class variate name for the time varied fixed effect. Default value is None. Only one time varied
    fixed effect can be included in the current version.
    :param fix: Expression for the time independent fixed effect. Default value is None. An example:
    fix = "Sex + age + Season".
    :param forder: the order of Legendre polynomials for the time varied fixed effect. The default value is 3.
    :param aorder: the order of Legendre polynomials for the additive genetic effect. The default value is 3.
    :param porder: the order of Legendre polynomials for the permanent environment effect. The default value is 3.
    :param na_method: The method to deal with missing values. The default value is 'omit'. 'omit' method will delete the
    row with missing values. 'include' method will fill the missing values with the adjacent values.
    :param prefix_outfile: the prefix for the output file. Default is 'unbalance_longwas_fixed'.
    :return: A pandas data frame for the test result.
    """
    logging.info('################################')
    logging.info('###Prepare the related matrix###')
    logging.info('################################')
    if var_com.shape[0] != aorder * (aorder + 1) / 2 + aorder + 1 + porder * (
            porder + 1) / 2 + porder + 1 + 1:
        logging.info('ERROR: Variances do not match the data, please check')
        exit()
    logging.info('***Read the data file***')
    logging.info('Data file: ' + data_file)
    data_df = pd.read_csv(data_file, sep='\s+', header=0)
    logging.info('NA method: ' + na_method)
    if na_method == 'omit':
        data_df = data_df.dropna()
    elif na_method == 'include':
        data_df = data_df.fillna(method='ffill')
        data_df = data_df.fillna(method='bfill')
    else:
        logging.info('na_method does not exist: ' + na_method)
        exit()
    col_names = data_df.columns
    logging.info('The column names of data file: ' + ' '.join(list(col_names)))
    logging.info(
        'Note: Variates beginning with a capital letter is converted into factors.'
    )
    class_vec = []
    for val in col_names:
        if not val[0].isalpha():
            logging.info(
                "The first character of columns names must be alphabet!")
            exit()
        if val[0] == val.capitalize()[0]:
            class_vec.append(val)
            data_df[val] = data_df[val].astype('str')
        else:
            try:
                data_df[val] = data_df[val].astype('float')
            except Exception as e:
                logging.info(e)
                logging.info(val + " may contain string, please check!")
                exit()
    logging.info('Individual column: ' + id)
    if id not in col_names:
        logging.info(id + ' is not in the data file, please check!')
        exit()
    if id not in class_vec:
        logging.info('The initial letter of {} should be capital'.format(id))
        exit()
    id_order = []
    id_arr = list(data_df[id])
    id_order.append(id_arr[0])
    for i in range(1, len(id_arr)):
        if id_arr[i] != id_arr[i - 1]:
            id_order.append(id_arr[i])
    id_in_data = set(data_df[id])
    if len(id_in_data) - len(id_order) != 0:
        logging.info('The data is not sored by individual ID!')
        exit()
    logging.info('Time points column: ' + tpoint)
    if tpoint not in col_names:
        logging.info(tpoint + ' is not in the data file, please check!')
        exit()
    if tpoint in class_vec:
        logging.info(
            'The initial letter of {} should be lowercase'.format(tpoint))
        exit()
    logging.info('Trait column: ' + trait)
    if trait not in col_names:
        logging.info(trait + ' is not in the data file, please check!')
        exit()
    if trait in class_vec:
        logging.info(
            'The initial letter of {} should be lowercase'.format(trait))
        exit()
    logging.info('Code factor variables of the data file: ' +
                 ' '.join(list(class_vec)))
    code_val = {}
    code_dct = dct_2D()
    for val in class_vec:
        code_val[val] = 0
        temp = []
        for i in range(data_df.shape[0]):
            if data_df[val][i] not in code_dct[val]:
                code_val[val] += 1
                code_dct[val][data_df[val][i]] = str(code_val[val])
            temp.append(code_dct[val][data_df[val][i]])
        data_df[val] = np.array(temp)
    for val in class_vec:
        data_df[val] = data_df[val].astype('int')
    logging.info('***Build the design matrix for fixed effect***')
    logging.info('Time dependent fixed effect: ' + str(tfix))
    leg_fix = leg(data_df[tpoint], forder)
    if tfix == None:
        xmat_t = np.concatenate(leg_fix, axis=1)
        xmat_t = csr_matrix(xmat_t)
    else:
        if tfix not in class_vec:
            logging.info(tfix + ' is not the class variate')
            exit()
        row = np.array(range(data_df.shape[0]))
        col = np.array(data_df[tfix]) - 1
        val = np.array([1.0] * data_df.shape[0])
        tfix_mat = csr_matrix((val, (row, col)))
        xmat_t = []
        for i in range(len(leg_fix)):
            xmat_t.append(tfix_mat.multiply(leg_fix[i]))
        xmat_t = hstack(xmat_t)
        del row, col, val
        gc.collect()
    logging.info('Time independent fix effect: ' + str(fix))
    xmat_nt = None
    if fix == None:
        xmat_nt = None
    else:
        try:
            fix_exp = ''
            vec = fix.split('+')
            for i in vec:
                val = i.strip()
                if val in class_vec:
                    fix_exp += 'C(' + val + ')'
                else:
                    fix_exp += val
            xmat_nt = dmatrix(fix_exp, data_df)
            logging.info('The expression for fixed effect: ' + fix_exp)
        except Exception as e:
            logging.info(e + ': Check the fix effect expression.')
            exit()
        xmat_nt = csr_matrix(xmat_nt[:, 1:])
    xmat = hstack([xmat_t, xmat_nt])
    xmat = xmat.toarray()
    max_id = max(data_df[id]) + 1
    tmin = min(data_df[tpoint])
    tmax = max(data_df[tpoint])
    leg_lst = [
    ]  # legendre polynomials for time dependent fixed SNP effects, save for each individuals
    for i in range(1, max_id):
        leg_lst.append(
            leg_mt(data_df[data_df[id] == i][tpoint], tmax, tmin, forder))
    tpoint_vec = sorted(set(data_df[tpoint]))
    leg_tpoint_mat = leg_mt(np.array(tpoint_vec), tmax, tmin, forder)
    leg_tpoint_accum = np.sum(leg_tpoint_mat, axis=0)
    logging.info('***Read the kinship matrix***')
    logging.info('Kinship file: ' + kin_file)
    with open(kin_file) as fin:
        row = []
        col = []
        kin = []
        id_in_kin = {}
        for line in fin:
            arr = line.split()
            id_in_kin[arr[0]] = 1
            id_in_kin[arr[1]] = 1
            if arr[0] not in code_dct[id]:
                logging.info(arr[0] + ' is not in the kinship inversion file!')
                exit()
            if arr[1] not in code_dct[id]:
                logging.info(arr[1], 'is not in the kinship inversion file!')
                exit()
            row.append(int(code_dct[id][arr[0]]))
            col.append(int(code_dct[id][arr[1]]))
            kin.append(float(arr[2]))
    id_not_in_kin = list(set(code_dct[id].keys()) - set(id_in_kin.keys()))
    if len(id_not_in_kin) != 0:
        logging.info(
            'The ID: {} in the data file is not in the kinship file!'.format(
                ' '.join(id_not_in_kin)))
        exit()
    kin = csr_matrix(
        (np.array(kin), (np.array(row) - 1, np.array(col) - 1))).toarray()
    kin = np.add(kin, kin.T)
    kin[np.diag_indices_from(kin)] = 0.5 * np.diag(kin)
    del row, col
    gc.collect()
    logging.info('***Build the dedign matrix for random effect***')
    logging.info('Legendre order for additive effects: ' + str(aorder))
    leg_add = leg(data_df[tpoint], aorder)
    row = np.array(range(data_df.shape[0]))
    col = np.array(data_df[id]) - 1
    val = np.array([1.0] * data_df.shape[0])
    add_mat = csr_matrix((val, (row, col)),
                         shape=(data_df.shape[0], kin.shape[0]))
    zmat_add = []
    for i in range(len(leg_add)):
        zmat_add.append(add_mat.multiply(leg_add[i]))
    logging.info('Legendre order for permanent environmental effect: ' +
                 str(porder))
    leg_per = leg(data_df[tpoint], porder)
    per_mat = csr_matrix((val, (row, col)))
    zmat_per = []
    for i in range(len(leg_per)):
        zmat_per.append((per_mat.multiply(leg_per[i])))
    del row, col, val
    gc.collect()
    zmat = [zmat_add, zmat_per]
    y = data_df[trait].values.reshape(data_df.shape[0], 1)
    # kin_inv = [kin_inv, sparse.eye(max(data_df[id]), format="csr")]
    logging.info('***Prepare the merged Z matrix***')
    eff_ind = [[0, xmat.shape[1]]]  # the index for all effects [start end]
    zmat_con_lst = []  # combined random matrix
    for i in range(len(zmat)):
        temp = [eff_ind[i][-1]]
        zmat_con_lst.append(hstack(zmat[i]))
        for j in range(len(zmat[i])):
            temp.append(temp[-1] + zmat[i][j].shape[1])
        eff_ind.append(temp)
    logging.info('***Calculate the phenotypic (co)variance***')
    add_cov = var_com.loc[var_com.loc[:, 'vari'] == 1, :]
    row = np.array(add_cov['varij']) - 1
    col = np.array(add_cov['varik']) - 1
    val = add_cov['var_val']
    add_cov = csr_matrix((val, (row, col))).toarray()
    add_cov = add_cov + np.tril(add_cov, k=-1).T
    per_cov = var_com.loc[var_com.loc[:, 'vari'] == 2, :]
    row = np.array(per_cov['varij']) - 1
    col = np.array(per_cov['varik']) - 1
    val = per_cov['var_val']
    per_cov = csr_matrix((val, (row, col))).toarray()
    per_cov = per_cov + np.tril(per_cov, k=-1).T
    res_var = np.array(var_com['var_val'])[-1]
    vmat = zmat_con_lst[0].dot((zmat_con_lst[0].dot(np.kron(add_cov, kin))).T)
    one_id = sparse.eye(zmat_con_lst[1].shape[1] / per_cov.shape[0])
    vmat = vmat + zmat_con_lst[1].dot(
        (zmat_con_lst[1].dot(sparse.kron(per_cov, one_id))).T)
    vmat_diag = np.diag(vmat) + res_var
    np.fill_diagonal(vmat, vmat_diag)
    vmat = linalg.inv(vmat)
    logging.info('***Read the snp data***')
    snp_mat = read_plink(bed_file)
    if np.any(np.isnan(snp_mat)):
        logging.info('Missing genotypes are imputed with random genotypes.')
        snp_mat = impute_geno(snp_mat)
    num_id = snp_mat.shape[0]
    num_snp = snp_mat.shape[1]
    logging.info("There are {:d} individuals and {:d} SNPs.".format(
        num_id, num_snp))
    fam_df = pd.read_csv(bed_file + '.fam', sep='\s+', header=None)
    id_geno = list(np.array(fam_df.iloc[:, 1], dtype=str))
    id_order_index = []
    for i in id_order:
        id_order_index.append(id_geno.index(i))
    if snp_lst is None:
        snp_lst = range(num_snp)
    snp_lst = list(snp_lst)
    if min(snp_lst) < 0 or max(snp_lst) >= num_snp:
        logging.info('The value in the snp list should be >= {} and < {}', 0,
                     num_snp)
        exit()
    snp_mat = snp_mat[id_order_index, :]
    snp_mat = snp_mat[:, snp_lst]
    logging.info(
        '#####################################################################'
    )
    logging.info(
        '###Start the fixed regression longitudinal GWAS for unbalance data###'
    )
    logging.info(
        '#####################################################################'
    )
    chi_df = leg_lst[0].shape[1]
    eff_vec = []
    chi_vec = []
    p_vec = []
    p_min_vec = []
    p_accum_vec = []
    for i in tqdm(range(snp_mat.shape[1])):
        snp_fix = list(map(lambda x, y: x * y, leg_lst, list(snp_mat[:, i])))
        snp_fix = np.concatenate(snp_fix, axis=0)
        snp_fix = np.concatenate((xmat, snp_fix), axis=1)
        xv = np.dot(snp_fix.T, vmat)
        xvx = np.dot(xv, snp_fix)
        xvx = np.linalg.inv(xvx)
        xvy = np.dot(xv, y)
        b = np.dot(xvx, xvy)
        eff = b[-chi_df:, -1]
        eff_var = xvx[-chi_df:, -chi_df:]
        chi_val = np.sum(np.dot(np.dot(eff.T, np.linalg.inv(eff_var)), eff))
        p_val = chi2.sf(chi_val, chi_df)
        eff_vec.append(b[-chi_df:, -1])
        chi_vec.append(chi_val)
        p_vec.append(p_val)
        p_tpoint_vec = []
        for k in range(leg_tpoint_mat.shape[0]):
            eff_tpoint = np.sum(np.dot(leg_tpoint_mat[k, :], eff))
            eff_var_tpoint = np.sum(
                np.dot(leg_tpoint_mat[k, :],
                       np.dot(eff_var, leg_tpoint_mat[k, :])))
            chi_tpoint = eff_tpoint * eff_tpoint / eff_var_tpoint
            p_tpoint = chi2.sf(chi_tpoint, 1)
            p_tpoint_vec.append(p_tpoint)
        p_min_vec.append(min(p_tpoint_vec))
        eff_accum = np.sum(np.dot(leg_tpoint_accum, eff))
        eff_var_accum = np.sum(
            np.dot(leg_tpoint_accum, np.dot(eff_var, leg_tpoint_accum)))
        chi_accum = eff_accum * eff_accum / eff_var_accum
        p_accum = chi2.sf(chi_accum, 1)
        p_accum_vec.append(p_accum)
    logging.info('Finish association analysis')
    logging.info('***Output***')
    snp_info_file = bed_file + '.bim'
    snp_info = pd.read_csv(snp_info_file, sep='\s+', header=None)
    res_df = snp_info.iloc[snp_lst, [0, 1, 3, 4, 5]]
    res_df.columns = ['chro', 'snp_ID', 'pos', 'allele1', 'allele2']
    res_df.loc[:, 'order'] = snp_lst
    res_df = res_df.iloc[:, [5, 0, 1, 2, 3, 4]]
    eff_vec = np.array(eff_vec)
    for i in range(eff_vec.shape[1]):
        col_ind = 'eff' + str(i)
        res_df.loc[:, col_ind] = eff_vec[:, i]
    res_df.loc[:, 'chi_val'] = chi_vec
    res_df.loc[:, 'p_val'] = p_vec
    res_df.loc[:, 'p_min'] = p_min_vec
    res_df.loc[:, 'p_accum'] = p_accum_vec
    out_file = prefix_outfile + '.res'
    res_df.to_csv(out_file, sep=' ', index=False)
    return res_df
Пример #37
0
def lindblad_dissipator(l):
    return kron(l, l.conj()) - 0.5 *\
                      operator_to_superoperator(dag(l).dot(l), kind='anticommutator')
Пример #38
0
 def laplop(m, n):
     ddn = sp.spdiags(
         np.ones(n) * np.array([[1, -2, 1]]).T, [-1, 0, 1], n, n)
     ddm = sp.spdiags(
         np.ones(m) * np.array([[1, -2, 1]]).T, [-1, 0, 1], m, m)
     return sp.kron(ddm, sp.eye(n, n)) + sp.kron(sp.eye(m, m), ddn)
Пример #39
0
def compose(a, b):
    return sparse.kron(a, b, format='bsr')
Пример #40
0
    def BuildAKLT(self):
        lat = self.Lat

        for bond in range(0, lat.Number1neigh):
            for i in range(0, self.Nsite):
                j = lat.nn_[i, bond]
                # print(j)
                if i < j and j >= 0:
                    # Kxx1,2_ij
                    self.Kxx1Graph_[i, j] = lat.Kxx1
                    self.Kxx1Graph_[j, i] = lat.Kxx1
                    self.Kxx2Graph_[i, j] = lat.Kxx2
                    self.Kxx2Graph_[j, i] = lat.Kxx2

                    # Kyy1,2_ij
                    self.Kyy1Graph_[i, j] = lat.Kyy1
                    self.Kyy1Graph_[j, i] = lat.Kyy1
                    self.Kyy2Graph_[i, j] = lat.Kyy2
                    self.Kyy2Graph_[j, i] = lat.Kyy2

                    # Kzz1,2_ij
                    self.Kzz1Graph_[i, j] = lat.Kzz1
                    self.Kzz1Graph_[j, i] = lat.Kzz1
                    self.Kzz2Graph_[i, j] = lat.Kzz2
                    self.Kzz2Graph_[j, i] = lat.Kzz2

        print("\nKxx1Graph_:")
        matprint(self.Kxx1Graph_)
        print("\nKyy1Graph_:")
        matprint(self.Kyy1Graph_)
        print("\nKzz1Graph_:")
        matprint(self.Kzz1Graph_)
        print("\nKxx2Graph_:")
        matprint(self.Kxx2Graph_)
        print("\nKyy2Graph_:")
        matprint(self.Kyy2Graph_)
        print("\nKzz2Graph_:")
        matprint(self.Kzz2Graph_)

        self.Kxx1Pair_, self.Kxx1coef_ = PairConstructor(
            self.Kxx1Graph_, self.Nsite)
        self.Kyy1Pair_, self.Kyy1coef_ = PairConstructor(
            self.Kyy1Graph_, self.Nsite)
        self.Kzz1Pair_, self.Kzz1coef_ = PairConstructor(
            self.Kzz1Graph_, self.Nsite)

        self.Kxx2Pair_, self.Kxx2coef_ = PairConstructor(
            self.Kxx2Graph_, self.Nsite)
        self.Kyy2Pair_, self.Kyy2coef_ = PairConstructor(
            self.Kyy2Graph_, self.Nsite)
        self.Kzz2Pair_, self.Kzz2coef_ = PairConstructor(
            self.Kzz2Graph_, self.Nsite)

        # ---------------------Build Hamiltonian as Sparse Matrix-------------------

        print("[Hamiltonian.py] Building Hamiltonian as Sparse Matrix...")
        Spins = Dofs("SpinOne")
        if lat.Model == "TFIM":
            Spins = Dofs("SpinHalf")
        sx = Spins.Sx
        sy = Spins.Sy
        sz = Spins.Sz
        print("-------------------------------------------", sx.shape)

        Hamx1 = TwoSpinOps(self.Kxx1Pair_, self.Kxx1coef_, sx, sx, self.Nsite)
        Hamy1 = TwoSpinOps(self.Kyy1Pair_, self.Kyy1coef_, sy, sy, self.Nsite)
        Hamz1 = TwoSpinOps(self.Kzz1Pair_, self.Kzz1coef_, sz, sz, self.Nsite)

        Hamxxxx2 = TwoSpinOps(self.Kxx2Pair_, self.Kxx2coef_, sx * sx, sx * sx,
                              self.Nsite)
        Hamxxyy2 = TwoSpinOps(self.Kyy2Pair_, self.Kyy2coef_, sy * sy, sy * sy,
                              self.Nsite)
        Hamxxzz2 = TwoSpinOps(self.Kzz2Pair_, self.Kzz2coef_, sz * sz, sz * sz,
                              self.Nsite)

        Hamxy2 = TwoSpinOps(self.Kxx2Pair_, self.Kxx2coef_, sx * sy, sx * sy,
                            self.Nsite)
        Hamxz2 = TwoSpinOps(self.Kyy2Pair_, self.Kyy2coef_, sx * sz, sx * sz,
                            self.Nsite)

        Hamyx2 = TwoSpinOps(self.Kzz2Pair_, self.Kzz2coef_, sy * sx, sy * sx,
                            self.Nsite)
        Hamyz2 = TwoSpinOps(self.Kxx2Pair_, self.Kxx2coef_, sy * sz, sy * sz,
                            self.Nsite)

        Hamzx2 = TwoSpinOps(self.Kyy2Pair_, self.Kyy2coef_, sz * sx, sz * sx,
                            self.Nsite)
        Hamzy2 = TwoSpinOps(self.Kzz2Pair_, self.Kzz2coef_, sz * sy, sz * sy,
                            self.Nsite)

        Ham = Hamx1 + Hamy1 + Hamz1 + Hamxy2 + Hamxz2 + Hamyx2 + Hamyz2 + Hamzx2 + Hamzy2
        Ham += Hamxxxx2 + Hamxxyy2 + Hamxxzz2

        # --------------------------- Add external field -------------------------

        hilsize = sx.shape[0]
        for i in range(0, self.Nsite):
            ida = sp.eye(hilsize**i)
            idb = sp.eye(hilsize**(self.Nsite - i - 1))
            Ham += sp.kron(ida, sp.kron(sx, idb)) * self.Hx
            Ham += sp.kron(ida, sp.kron(sy, idb)) * self.Hy
            Ham += sp.kron(ida, sp.kron(sz, idb)) * self.Hz

        # if lat.Model == "AKLT":
        Ham += sp.eye(Ham.shape[0]) * 2.0 / 3.0 * len(self.Kxx1coef_)
        return Ham
Пример #41
0
def spmatkron(matlist):
    return sps.csc_matrix(reduce(lambda A, B: sps.kron(A, B, 'csc'), matlist))
def mpc_increment(Ad_list, Bd_list, gd_list, x_tilda_vec, Xr, pred_x_tilda,
                  pred_del_u, Q, QN, R, N, xmin_tilda, xmax_tilda, del_umin,
                  del_umax):
    """
    Incremental MPC
    x_tilda_vec : [nx+nu, nx+nu]
    Xr          : [nx, N+1]
    Q           : [nx, nx]
    R           : [nu, nu]
    """

    tic_mat = time.time()
    # ========== Cast MPC problem to a QP: x = (x(0),x(1),...,x(N),u(0),...,u(N-1)) ==========
    nx = Ad_list[0].shape[0]
    nu = Bd_list[0].shape[1]

    # Cast MPC problem to a QP:
    #   x = (x(0),x(1),...,x(N), u(0),...,u(N-1))

    # Objective function
    # C_tilda = [I, 0]
    # Q_tilda = C_tilda.T * Q * C_tilta : (nx+nu, nx) * (nx, nx) * (nx, nx+nu) => (nx+nu, nx+nu)
    C_tilda = sparse.hstack([sparse.eye(nx),
                             np.zeros([nx, nu])])  # (nx, nx+nu)
    Q_tilda = C_tilda.transpose() * Q * C_tilda
    Q_tilda_N = C_tilda.transpose() * QN * C_tilda

    # - quadratic objective (P)
    P = sparse.block_diag([
        sparse.kron(sparse.eye(N), Q_tilda),  # Q x (N+1) on diagonal
        Q_tilda_N,
        sparse.kron(sparse.eye(N), R),  # R X (N) on diagonal
    ]).tocsc()

    # - linear objective (q)
    Q_C_tilda = Q * C_tilda
    QN_C_tilda = QN * C_tilda

    Q_C_tilda_trans = Q_C_tilda.transpose()
    QN_C_tilda_trans = QN_C_tilda.transpose()

    q = -Q_C_tilda_trans.dot(Xr[:, 0])  # index 0
    for ii in range(N - 1):
        q = np.hstack([q,
                       -Q_C_tilda_trans.dot(Xr[:, ii + 1])])  # index 1 ~ N-1
    q = np.hstack([q, -QN_C_tilda_trans.dot(Xr[:, N]),
                   np.zeros(N * nu)])  # index N

    # Augmentation for Incremental Control
    Ad_sys = Ad_list[0]
    Bd_sys = Bd_list[0]
    Aug_A_sys = np.hstack([Ad_sys, Bd_sys])
    Aug_A_increment = sparse.hstack(
        [sparse.csr_matrix((nu, nx)),
         sparse.eye(nu)])
    Ad_tilda = sparse.vstack([Aug_A_sys, Aug_A_increment])
    Bd_tilda = sparse.vstack([Bd_sys, sparse.eye(nu)])

    Ax_Ad = sparse.csc_matrix(Ad_tilda)
    Ax_diag = sparse.kron(sparse.eye(N + 1), -sparse.eye(nx + nu))
    Bu_Bd = sparse.csc_matrix(Bd_tilda)

    for i in range(N - 1):
        Ad_sys = Ad_list[i + 1]
        Bd_sys = Bd_list[i + 1]
        Aug_A_sys = np.hstack([Ad_sys, Bd_sys])
        Aug_A_increment = sparse.hstack(
            [sparse.csr_matrix((nu, nx)),
             sparse.eye(nu)])
        Ad_tilda = sparse.vstack([Aug_A_sys, Aug_A_increment])
        Bd_tilda = sparse.vstack([Bd_sys, sparse.eye(nu)])

        Ax_Ad = sparse.block_diag([Ax_Ad, Ad_tilda])
        Bu_Bd = sparse.block_diag([Bu_Bd, Bd_tilda])

    Ax_Ad_top = sparse.kron(np.ones(N + 1), np.zeros((nx + nu, nx + nu)))
    Ax_Ad_side = sparse.kron(np.ones((N, 1)), np.zeros((nx + nu, nx + nu)))
    Ax = Ax_diag + sparse.vstack(
        [Ax_Ad_top, sparse.hstack([Ax_Ad, Ax_Ad_side])])
    Bu_Bd_top = sparse.kron(np.ones(N), np.zeros((nx + nu, nu)))
    Bu = sparse.vstack([Bu_Bd_top, Bu_Bd])
    Aeq = sparse.hstack([Ax, Bu])

    # - Equality constraint (linear dynamics) : lower bound and upper bound
    leq = -x_tilda_vec  # later ueq == leq
    for i in range(N):
        gd_tilda = np.vstack([gd_list[i], np.zeros(
            (nu, 1))])  # gd_tilda for augmented system
        gd_tilda = np.squeeze(gd_tilda, axis=1)  # from (N,1) to (N,)
        leq = np.hstack([leq, -gd_tilda])
    # leq = np.hstack([-x_tilda_vec, np.zeros(N*nx)])
    ueq = leq

    # Original Code
    # ----- input and state constraints -----
    Aineq = sparse.eye((N + 1) * (nx + nu) + N * nu)
    lineq = np.hstack(
        [np.kron(np.ones(N + 1), xmin_tilda),
         np.kron(np.ones(N), del_umin)])
    uineq = np.hstack(
        [np.kron(np.ones(N + 1), xmax_tilda),
         np.kron(np.ones(N), del_umax)])

    # ----- OSQP constraints -----
    A = sparse.vstack([Aeq, Aineq]).tocsc()
    lb = np.hstack([leq, lineq])
    ub = np.hstack([ueq, uineq])

    print("matrix time :", time.time() - tic_mat)

    # ==========Create an OSQP object and Setup workspace ==========
    tic_solve = time.time()
    prob = osqp.OSQP()
    prob.setup(P, q, A, lb, ub, verbose=False, polish=False,
               warm_start=False)  # verbose: print output.

    # Solve
    res = prob.solve()

    # Check solver status
    if res.info.status != 'solved':
        print('OSQP did not solve the problem!')
        raise ValueError('OSQP did not solve the problem!')

    print("solver time :", time.time() - tic_solve)

    tic_pred = time.time()

    # Predictive States and Actions
    sol_state = res.x[:-N * nu]
    sol_action = res.x[-N * nu:]

    for ii in range((N + 1) * (nx + nu)):
        if ii % (nx + nu) == 0:
            pred_x_tilda[0, ii // (nx + nu)] = sol_state[ii]  # X
        elif ii % (nx + nu) == 1:
            pred_x_tilda[1, ii // (nx + nu)] = sol_state[ii]  # Y
        elif ii % (nx + nu) == 2:
            pred_x_tilda[2, ii // (nx + nu)] = sol_state[ii]  # Vx
        elif ii % (nx + nu) == 3:
            pred_x_tilda[3, ii // (nx + nu)] = sol_state[ii]  # Yaw
        elif ii % (nx + nu) == 4:
            pred_x_tilda[4, ii // (nx + nu)] = sol_state[ii]  # Steer
        else:  # ii % (nx+nu) == 5:
            pred_x_tilda[5, ii // (nx + nu)] = sol_state[ii]  # accel_track

    for jj in range((N) * nu):
        if jj % nu == 0:
            pred_del_u[0, jj // nu] = sol_action[jj]
        else:  # jj % nu == 1
            pred_del_u[1, jj // nu] = sol_action[jj]
    pred_del_u[:, -1] = pred_del_u[:, -2]  # append last control

    print("Parsing pred state action time :", time.time() - tic_pred)

    return pred_x_tilda, pred_del_u
Пример #43
0
if bilayerfull :
  sq2basTB, bas2sqTB = get_bilayer_bas(sq2bas, sq2bas)
  #dd = [0]
  EigeBLt = np.zeros((len(dd),eigv_k), dtype=float)
  fig = plt.figure(1)
  for ii in range(len(dd)):
    print '# ii = ', ii
    stime = time.time()
    d = dd[ii]
    VjjtIntL = FQHE_2DEG_Int_Interlayer(m, asp, d)
    #print VjjtIntL
    #break
    row1, col1, dat1 = get_FQHE_Interlayer_MatEle(bas2sqTB, sq2basTB, bas2sq, sq2bas, bas2sq, sq2bas, VjjtIntL)

    #HamBL = sps.kron(Ham, Ham, format='coo') #+ 0.0*sp.sparse.coo_matrix((dat1,(row1, col1)), shape=(numbas**2, numbas**2))
    HamBL = sps.kron(np.eye(numbas), Hamff, format='coo')
    HamBL += sps.kron(Hamff, np.eye(numbas), format='coo') + sp.sparse.coo_matrix((dat1,(row1, col1)), shape=(numbas**2, numbas**2))
  
    EigeBL, EigfBL = eigsh(HamBL, k=eigv_k, which=mode1)
    print sorted(EigeBL)
    EigeBLt[ii,:] = np.real(sorted(EigeBL))-np.amin(EigeBL)
  
    etime = time.time()
    print 'get basis time =', etime - stime
  
    #plt.clf()
    plt.plot(dd*eigv_k, EigeBLt[:,0:5],'o')
    plt.savefig('EigeBLt_third_'+str(m)+'_t.eps',format='eps')
  
np.savetxt('EigeBLt_third_'+str(m)+'_t.dat', EigeBLt)
plt.show()
Пример #44
0
def tensor(*args):
    """Calculates the tensor product of input operators. 
    
    Parameters
    ----------
    args : array_like 
        ``list`` or ``array`` of quantum objects for tensor product.
        
    Returns
    --------
    obj : qobj
        A composite quantum object.
    
    Examples
    --------    
    >>> tensor([sigmax(), sigmax()])
    Quantum object: dims = [[2, 2], [2, 2]], shape = [4, 4], type = oper, isHerm = True
    Qobj data = 
    [[ 0.+0.j  0.+0.j  0.+0.j  1.+0.j]
     [ 0.+0.j  0.+0.j  1.+0.j  0.+0.j]
     [ 0.+0.j  1.+0.j  0.+0.j  0.+0.j]
     [ 1.+0.j  0.+0.j  0.+0.j  0.+0.j]]

    """
    if not args:
        raise TypeError("Requires at least one input argument")
    num_args = len(args)
    step = 0
    for n in range(num_args):
        if isinstance(args[n], Qobj):
            qos = args[n]
            if step == 0:
                dat = qos.data
                dim = qos.dims
                shp = qos.shape
                step = 1
            else:
                dat = sp.kron(dat, qos.data,
                              format='csr')  #sparse Kronecker product
                dim = [dim[0] + qos.dims[0],
                       dim[1] + qos.dims[1]]  #append dimensions of Qobjs
                shp = [dat.shape[0], dat.shape[1]]  #new shape of matrix

        elif isinstance(
                args[n],
            (list, ndarray)):  #checks if input is list/array of Qobjs
            qos = args[n]
            items = len(qos)  #number of inputs
            if not all([
                    isinstance(k, Qobj) for k in qos
            ]):  #raise error if one of the inputs is not a quantum object
                raise TypeError("One of inputs is not a quantum object")
            if items == 1:  # if only one Qobj, do nothing
                if step == 0:
                    dat = qos[0].data
                    dim = qos[0].dims
                    shp = qos[0].shape
                    step = 1
                else:
                    dat = sp.kron(dat, qos[0].data,
                                  format='csr')  #sparse Kronecker product
                    dim = [dim[0] + qos[0].dims[0], dim[1] + qos[0].dims[1]
                           ]  #append dimensions of Qobjs
                    shp = [dat.shape[0], dat.shape[1]]  #new shape of matrix
            elif items != 1:
                if step == 0:
                    dat = qos[0].data
                    dim = qos[0].dims
                    shp = qos[0].shape
                    step = 1
                for k in range(items - 1):  #cycle over all items
                    dat = sp.kron(dat, qos[k + 1].data,
                                  format='csr')  #sparse Kronecker product
                    dim = [
                        dim[0] + qos[k + 1].dims[0],
                        dim[1] + qos[k + 1].dims[1]
                    ]  #append dimensions of Qobjs
                    shp = [dat.shape[0], dat.shape[1]]  #new shape of matrix
    out = Qobj()
    out.data = dat
    out.dims = dim
    out.shape = shp
    if qutip.settings.auto_tidyup:
        return Qobj(out).tidyup()  #returns tidy Qobj
    else:
        return Qobj(out)
Пример #45
0
 def to_linblad(self, gamma=1.):
     l = self.data
     return gamma * (kron(l, l.conj()) - \
             operator_to_superoperator(dag(l).dot(l), type='anticommutator'))
Пример #46
0
def unbalance_emai(y,
                   xmat,
                   zmat,
                   kin,
                   init=None,
                   maxiter=30,
                   cc_par=1.0e-8,
                   cc_gra=1.0e-6,
                   em_weight_step=0.001):
    num_var_pos = 1
    for i in range(len(zmat)):
        num_var_pos += len(zmat[i])
    y_var = np.var(y) / num_var_pos
    num_record = y.shape[0]
    var_com = []
    eff_ind = [[0, xmat.shape[1]]]  # the index for all effects [start end]
    zmat_con_lst = []  # combined random matrix
    cov_dim = []  # the dim for covariance matrix
    vari = []
    varij = []
    varik = []
    i = 0
    for i in range(len(zmat)):
        temp = [eff_ind[i][-1]]
        zmat_con_lst.append(hstack(zmat[i]))
        cov_dim.append(len(zmat[i]))
        for j in range(len(zmat[i])):
            temp.append(temp[-1] + zmat[i][j].shape[1])
            for k in range(j + 1):
                vari.append(i + 1)
                varij.append(j + 1)
                varik.append(k + 1)
                if j == k:
                    var_com.append(y_var)
                else:
                    var_com.append(0.0)
        eff_ind.append(temp)
    var_com.append(y_var)
    vari.append(i + 2)
    varij.append(1)
    varik.append(1)
    if init is None:
        var_com = np.array(var_com)
    else:
        if len(var_com) != len(init):
            logging.info('ERROR: The length of initial variances should be' +
                         len(var_com))
            exit()
        else:
            var_com = np.array(init)
    var_com_update = np.array(var_com)
    logging.info('***prepare the MME**')
    zmat_con = hstack(zmat_con_lst)  # design matrix for random effects
    wmat = hstack([xmat, zmat_con])  # merged design matrix
    cmat_pure = np.dot(wmat.T, wmat)  # C matrix
    rhs_pure = wmat.T.dot(y)  # right hand
    # em weight vector
    if em_weight_step <= 0.0 or em_weight_step > 1.0:
        logging.info(
            'ERROR: The em weight step should be between 0 (not include) and 1 (include)'
        )
        exit()
    iter_count = 0
    cc_par_val = 1000.0
    cc_gra_val = 1000.0
    delta = 1000.0
    logging.info("initial variances: " +
                 ' '.join(np.array(var_com, dtype=str)))
    covi_mat = pre_covi_mat(cov_dim, var_com)
    if covi_mat is None:
        logging.inf(
            "ERROR: Initial variances is not positive define, please check!")
        exit()
    while iter_count < maxiter:
        iter_count += 1
        logging.info('***Start the iteration: ' + str(iter_count) + ' ***')
        logging.info("Prepare the coefficient matrix")
        cmat = (cmat_pure.multiply(1.0 / var_com[-1])).toarray()
        for i in range(len(cov_dim)):
            if isspmatrix(kin[i]):
                temp = sparse.kron(covi_mat[i], kin[i])
                temp = temp.toarray()
            else:
                temp = linalg.kron(covi_mat[i], kin[i])
            cmat[eff_ind[i + 1][0]:eff_ind[i + 1][-1], \
            eff_ind[i + 1][0]:eff_ind[i + 1][-1]] = \
                np.add(cmat[eff_ind[i + 1][0]:eff_ind[i + 1][-1], \
                       eff_ind[i + 1][0]:eff_ind[i + 1][-1]], temp)
        rhs_mat = np.divide(rhs_pure, var_com[-1])
        cmati = linalg.inv(cmat)
        eff = np.dot(cmati, rhs_mat)
        e = y - xmat.dot(eff[:eff_ind[0][1], :]) - zmat_con.dot(
            eff[eff_ind[0][1]:, :])
        # first-order derivative
        fd_mat = pre_fd_mat_x(cmati, kin, covi_mat, eff, eff_ind, e, cov_dim,
                              zmat_con_lst, wmat, num_record, var_com)
        # AI matrix
        ai_mat = pre_ai_mat(cmati, covi_mat, eff, eff_ind, e, cov_dim,
                            zmat_con_lst, wmat, var_com)
        # EM matrix
        em_mat = pre_em_mat(cov_dim, zmat_con_lst, num_record, var_com)
        # Increase em weight to guarantee variances positive
        gamma = -em_weight_step
        while gamma < 1.0:
            gamma = gamma + em_weight_step
            if gamma >= 1.0:
                gamma = 1.0
            wemai_mat = (1 - gamma) * ai_mat + gamma * em_mat
            delta = np.dot(linalg.inv(wemai_mat), fd_mat)
            var_com_update = var_com + delta
            covi_mat = pre_covi_mat(cov_dim, var_com_update)
            if covi_mat is not None:
                logging.info('EM weight value: ' + str(gamma))
                break
        logging.info('Updated variances: ' +
                     ' '.join(np.array(var_com_update, dtype=str)))
        if covi_mat is None:
            logging.info("ERROR: Updated variances is not positive define!")
            exit()
        # Convergence criteria
        cc_par_val = np.sum(pow(delta, 2)) / np.sum(pow(var_com_update, 2))
        cc_par_val = np.sqrt(cc_par_val)
        cc_gra_val = np.sqrt(np.sum(pow(fd_mat, 2))) / len(var_com)
        var_com = var_com_update.copy()
        logging.info("Change in parameters, Norm of gradient vector: " +
                     str(cc_par_val) + ', ' + str(cc_gra_val))
        if cc_par_val < cc_par and cc_gra_val < cc_gra:
            break
    if cc_par_val < cc_par and cc_gra_val < cc_gra:
        logging.info("Variances Converged")
    else:
        logging.info("Variances not Converged")
    var_pd = {'vari': vari, "varij": varij, "varik": varik, "var_val": var_com}
    var_pd = pd.DataFrame(var_pd,
                          columns=['vari', "varij", "varik", "var_val"])
    return var_pd
Пример #47
0
def left(a):
    if issparse(a):
        idm = identity(a.toarray().shape[-1])
    else:
        idm = identity(a.shape[-1])
    return kron(a, idm)
Пример #48
0
    def configure_io(self):
        """
        I/O creation is delayed until configure so that we can determine the shape and units for
        the states.
        """
        time_units = self.options['time_units']

        num_disc_nodes = self.options['grid_data'].subset_num_nodes['state_disc']
        num_col_nodes = self.options['grid_data'].subset_num_nodes['col']

        state_options = self.options['state_options']

        transcription = self.options['transcription']

        self.add_input(name='dt_dstau', shape=(num_col_nodes,), units=time_units,
                       desc='For each node, the duration of its '
                            'segment in the integration variable')

        self.xd_str = {}
        self.fd_str = {}
        self.xc_str = {}
        self.xdotc_str = {}

        for state_name, options in state_options.items():
            shape = options['shape']
            units = options['units']

            rate_units = get_rate_units(units, time_units)

            self.add_input(
                name='state_disc:{0}'.format(state_name),
                shape=(num_disc_nodes,) + shape,
                desc='Values of state {0} at discretization nodes'.format(state_name),
                units=units)

            if transcription == 'gauss-lobatto':
                self.add_input(
                    name='staterate_disc:{0}'.format(state_name),
                    shape=(num_disc_nodes,) + shape,
                    units=rate_units,
                    desc='EOM time derivative of state {0} at '
                         'discretization nodes'.format(state_name))

                self.add_output(
                    name='state_col:{0}'.format(state_name),
                    shape=(num_col_nodes,) + shape, units=units,
                    desc='Interpolated values of state {0} at '
                         'collocation nodes'.format(state_name))

            self.add_output(
                name='staterate_col:{0}'.format(state_name),
                shape=(num_col_nodes,) + shape,
                units=rate_units,
                desc='Interpolated rate of state {0} at collocation nodes'.format(state_name))

            self.xd_str[state_name] = 'state_disc:{0}'.format(state_name)
            self.fd_str[state_name] = 'staterate_disc:{0}'.format(state_name)
            self.xc_str[state_name] = 'state_col:{0}'.format(state_name)
            self.xdotc_str[state_name] = 'staterate_col:{0}'.format(state_name)

        if transcription == 'gauss-lobatto':
            Ai, Bi, Ad, Bd = self.options['grid_data'].phase_hermite_matrices('state_disc', 'col', sparse=True)

        elif transcription == 'radau-ps':
            Ai, Ad = self.options['grid_data'].phase_lagrange_matrices('state_disc', 'col', sparse=True)
            Bi = Bd = sp.csr_matrix(np.zeros(shape=(num_col_nodes, num_disc_nodes)))

        else:
            raise ValueError('unhandled transcription type: '
                             '{0}'.format(self.options['transcription']))

        self.matrices = {'Ai': Ai, 'Bi': Bi, 'Ad': Ad, 'Bd': Bd}
        self.jacs = {'Ai': {}, 'Bi': {}, 'Ad': {}, 'Bd': {}}
        self.sizes = {}
        # self.num_col_nodes = num_col_nodes
        # self.num_disc_nodes = num_disc_nodes

        for name, options in state_options.items():
            shape = options['shape']

            size = np.prod(shape)
            self.sizes[name] = size

            for key in self.jacs:
                # Each jacobian matrix has a form that is defined by the Kronecker product
                # of the interpolation matrix and np.eye(size). Make sure to specify csc format
                # here to avoid spurious zeros.
                self.jacs[key][name] = sp.kron(sp.csr_matrix(self.matrices[key]),
                                               sp.eye(size),
                                               format='csc')

            self.sizes[name] = size

            #
            # Partial of xdotc wrt dt_dstau
            #
            rs = np.arange(num_col_nodes * size, dtype=int)
            cs = np.repeat(np.arange(num_col_nodes, dtype=int), size)

            self.declare_partials(of=self.xdotc_str[name], wrt='dt_dstau',
                                  rows=rs, cols=cs)

            if transcription == 'gauss-lobatto':
                self.declare_partials(
                    of=self.xc_str[name], wrt='dt_dstau',
                    rows=rs, cols=cs)

                Ai_rows, Ai_cols, data = sp.find(self.jacs['Ai'][name])
                self.declare_partials(of=self.xc_str[name], wrt=self.xd_str[name],
                                      rows=Ai_rows, cols=Ai_cols, val=data)

                Bi_rows, Bi_cols, _ = sp.find(self.jacs['Bi'][name])
                self.declare_partials(of=self.xc_str[name], wrt=self.fd_str[name],
                                      rows=Bi_rows, cols=Bi_cols)

                Bd_rows, Bd_cols, data = sp.find(self.jacs['Bd'][name])
                self.declare_partials(of=self.xdotc_str[name], wrt=self.fd_str[name],
                                      rows=Bd_rows, cols=Bd_cols, val=data)

            Ad_rows, Ad_cols, _ = sp.find(self.jacs['Ad'][name])
            self.declare_partials(of=self.xdotc_str[name], wrt=self.xd_str[name],
                                  rows=Ad_rows, cols=Ad_cols)
Пример #49
0
 def radius(eps):
     return approx_spectral_radius(kron(Hc, W).dot(eps) - kron(Hc2, D).dot(eps ** 2), pyamg=pyamg) - 1
Пример #50
0
def PhaseArray(i, k, theta):
    if i == 0:
        if k == 1:
            return phase(theta)
        return ss.kron(PhaseArray(i, k - 1, theta), identity, format='csr')
    return ss.kron(identity, PhaseArray(i - 1, k - 1, theta), format='csr')
Пример #51
0
def random_model(shape, seed=None, anisotropy=None, its=100, bounds=None):
    """
    Create a random model by convolving a kernel with a
    uniformly distributed model.

    Parameters
    ----------
    shape: tuple
        shape of the model.
    seed: int
        pick which model to produce, prints the seed if you don't choose.
    anisotropy: numpy.ndarray
        this is the (3 x n) blurring kernel that is used.
    its: int
        number of smoothing iterations
    bounds: list
        bounds on the model, len(list) == 2

    Returns
    -------
    numpy.ndarray
        M, the model

    Examples
    --------

    .. plot::
        :include-source:

        import matplotlib.pyplot as plt
        import discretize
        plt.colorbar(plt.imshow(discretize.utils.random_model((50, 50), bounds=[-4, 0])))
        plt.title('A very cool, yet completely random model.')
        plt.show()


    """
    if bounds is None:
        bounds = [0, 1]

    if seed is None:
        seed = np.random.randint(1e3)
        print('Using a seed of: ', seed)

    if type(shape) in num_types:
        shape = (shape, ) # make it a tuple for consistency

    np.random.seed(seed)
    mr = np.random.rand(*shape)
    if anisotropy is None:
        if len(shape) is 1:
            smth = np.array([1, 10., 1], dtype=float)
        elif len(shape) is 2:
            smth = np.array([[1, 7, 1], [2, 10, 2], [1, 7, 1]], dtype=float)
        elif len(shape) is 3:
            kernal = np.array([1, 4, 1], dtype=float).reshape((1, 3))
            smth = np.array(sp.kron(sp.kron(kernal, kernal.T).todense()[:], kernal).todense()).reshape((3, 3, 3))
    else:
        assert len(anisotropy.shape) is len(shape), 'Anisotropy must be the same shape.'
        smth = np.array(anisotropy, dtype=float)

    smth = smth/smth.sum() # normalize
    mi = mr
    for i in range(its):
        mi = ndi.convolve(mi, smth)

    # scale the model to live between the bounds.
    mi = (mi - mi.min())/(mi.max()-mi.min()) # scaled between 0 and 1
    mi = mi*(bounds[1]-bounds[0])+bounds[0]

    return mi
Пример #52
0
# Nombre d'itération
iter_num = 21

# Fournit les coordonnées du centre du domaine
a = w / 2

#Demi-longueur du rectangle, qui restera constante
Longueur = 3 * a / 4

numpy.save(f"donnees_init.npy", np.array([w, iter_num, Longueur]))

## Création de la matrice d'incidence
x = eye(w - 1, w, 1) - eye(w - 1, w)
y = eye(w - 1, w, 1) - eye(w - 1, w)
B = vstack([kron(eye(w), x), kron(y, eye(w))])
B = torch.from_numpy(B.toarray())

## Etape de création du rectangle

# Construction du tableau initial
init_table = a * torch.ones((w, w))
y = w // 2
for i in range(w):
    for j in range(w):
        if abs(j - y) < Longueur:
            init_table[i, j] = abs(i - y)

# Fonction d'activation
sigma = lambda x: 1 / (1 + torch.exp(-10 * x))
Пример #53
0
R = 0.1 * sparse.eye(1)

# Initial and reference states
x0 = np.array([0.1, 0.2])  # initial state
# Reference input and states
pref = 7.0
vref = 0
xref = np.array([pref, vref])  # reference state

# Prediction horizon
Np = 20

# Cast MPC problem to a QP: x = (x(0),x(1),...,x(N),u(0),...,u(N-1))
# - quadratic objective
P = sparse.block_diag(
    [sparse.kron(sparse.eye(Np), Q), QN,
     sparse.kron(sparse.eye(Np), R)]).tocsc()
# - linear objective
q = np.hstack(
    [np.kron(np.ones(Np), -Q.dot(xref)), -QN.dot(xref),
     np.zeros(Np * nu)])

# - linear dynamics
Ax = sparse.kron(sparse.eye(Np + 1), -sparse.eye(nx)) + sparse.kron(
    sparse.eye(Np + 1, k=-1), Ad)
Bu = sparse.kron(sparse.vstack([sparse.csc_matrix((1, Np)),
                                sparse.eye(Np)]), Bd)
Aeq = sparse.hstack([Ax, Bu])
leq = np.hstack([-x0, np.zeros(Np * nx)])
ueq = leq  # for equality constraints -> upper bound  = lower bound!
# - input and state constraints
Пример #54
0
def HadmardArray(i, k):
    if (i == 0):
        if k == 1:
            return hadmard
        return ss.kron(HadmardArray(i, k - 1), identity, format='csr')
    return ss.kron(identity, HadmardArray(i - 1, k - 1), format='csr')
Пример #55
0
            if 0 <= i-j < m:
                T[i,j] = b[i-j]
    return T

# print(toeplitz([1,2,3], 10))

N = im_array.shape[0]

def gaussian1d(k_len = 5, sigma = 3):
    return gaussian_kernel(k_len, sigma)[k_len//2,:]

curr_1d_kernel = gaussian1d(nitems, sigma)
# Gaussian 1D kernel as matrix
T = toeplitz(curr_1d_kernel, N)

row_mat = sparse.kron(sparse.eye(N), T)
col_mat = sparse.kron(T, sparse.eye(N+nitems-1))
G = col_mat.dot(row_mat)

flat_blurry_image = blurry_image.flatten()


def lst_sq(x, A=G, b=flat_blurry_image):
    return linalg.norm(b - A.dot(x))**2


def lst_sq_grad(x, A=G, b=flat_blurry_image):
    return 2*A.T.dot(A.dot(x) - b)


# optim_output = optimize.minimize(lst_sq, np.zeros(N**2), method='L-BFGS-B', jac=lst_sq_grad, options={'disp':True})
Пример #56
0
def scnnmat(dims, pot=0., hop=1., bcond='p', format='lil'):
    """Return Hamiltonian matrix of a simple dim-dimensional tight-binding
    system with dimensions "dims" (tuple of positive integers of length dim),
    site potentials "pot" (float or 1D-array of floats with length size =
    prod(dims)), constant isotropic next-neighbor hopping "hop" (float) and
    boundary conditions "bcond" (string consisting of characters "s", "p" and
    "a").  Return matrix in LIL-sparse format.

    bcond may be at most of length dim. In this way, each dimension can have
    it's own boundary condition."""
    # 2011-02-28

    # check arguments
    assert format \
        in ['lil', 'dok', 'csr', 'csc', 'dia', 'coo', 'bsr', 'dense'], \
        'unknown format. Must be one of lil, dok, coo, dia, csr, csc, bsr ' + \
        'or dense'
    if isiterable(dims):
        dims = tuple(dims)
    else:
        dims = (dims,)
    #dims = dims[::-1]
    size = np.prod(dims, dtype=int)
    dim = len(dims)
    assert dim > 0, 'bad dimensions tuple: Must have at least one element'

    # Check boundary condition string
    assert len(bcond) <= dim, \
        'bad boundary condition: %s. Number of given boundary conditions ' + \
        'is greater than dimensionality of the system (%i)' % (bcond, dim)
    assert len(bcond) > 0, \
        'bad boundary condition: May not be empty string. At least one ' + \
        'character (s, p or a) has to be given'
    if len(bcond) < dim:
        bcond += bcond[-1]*(dim-len(bcond))

    # Method:
    # 1. Build matrix of a system with dimensions dims[:-1] (recursive call of
    #    the function), set the dims[-1] blocks with it using kronecker product
    #    (scipy.sparse.kron)
    # 2. Add the matrix elements of the remaining 1D-subsystem using setdiag
    # That's it!

    # Step 1
    subdims = dims[:-1]
    subsize = np.prod(subdims, dtype=int)
    if subsize > 1:
        # calculate submatrix recursevely
        mat = spa.kron(spa.eye(dims[-1], dims[-1]),
                       scnnmat(subdims, hop=hop, bcond=bcond[:-1]),
                       format='lil')
    else:
        # initialize matrix
        mat = spa.lil_matrix((size, size))

    # Step 2
    # If periodic or anti-periodic boundary conditions are given, set
    # respective elements as well. Do this first, because they may be
    # overwritten by the direct hoppings in the next step.
    if bcond[-1] in ['p', 'a']:
        if bcond[-1] == 'p':
            bcondhoparray = np.ones((subsize))*(-hop)
        else:
            bcondhoparray = np.ones((subsize))*hop
        mat.setdiag(bcondhoparray, size-subsize)
        mat.setdiag(bcondhoparray, subsize-size)
    elif bcond[-1] == 's':
        # do not set anything
        pass
    else:
        raise ValueError('bad boundary condition: %s. Expecting either "s" ' +
                         '(static), "p" (periodic) or "a" (antiperiodic)'
                         % bcond[-1])

    # set off-diagonals with hopping
    hoparray = np.ones((size-subsize))*(-hop)
    mat.setdiag(hoparray, subsize)
    mat.setdiag(hoparray, -subsize)

    # set diagonal elements with potential
    if isiterable(pot):
        pot = np.array(pot)
        assert len(pot.shape) == 1, \
            'potentials have wrong shape. Must be 1D-array-like'
        assert len(pot) == size, \
            'wrong number of potentials: Expecting %i, but got %i' \
            % (size, len(pot))
        mat.setdiag(pot)
    else:
        if pot != 0.:  # this step can be omitted if potentials are zero anyway
            mat.setdiag(np.ones((size))*pot)

    # convert matrix if needed
    if format != 'lil':
        mat = getattr(mat, 'to'+format)()

    # return matrix
    return mat
Пример #57
0
def ODW(Wo, Wd, transform='r', silence_warnings=True):
    """
    Constructs an o*d by o*d origin-destination style spatial weight for o*d
    flows using standard spatial weights on o origins and d destinations. Input
    spatial weights must be binary or able to be sutiably transformed to binary.

    Parameters
    ----------
    Wo          : W object for origin locations
                  o x o spatial weight object amongst o origins

    Wd          : W object for destination locations
                  d x d spatial weight object amongst d destinations

    transform   : Transformation for standardization of final OD spatial weight; default
                  is 'r' for row standardized
    Returns
    -------
    W           : spatial contiguity W object for assocations between flows
                 o*d x o*d spatial weight object amongst o*d flows between o
                 origins and d destinations
    
    Examples
    --------

    >>> import libpysal.api as ps
    >>> O = ps.lat2W(2,2)
    >>> D = ps.lat2W(2,2)
    >>> OD = ps.ODW(O,D)
    >>> OD.weights[0]
    [0.25, 0.25, 0.25, 0.25]
    >>> OD.neighbors[0]
    array([ 5,  6,  9, 10], dtype=int32)
    >>> OD.full()[0][0]
    array([0.  , 0.  , 0.  , 0.  , 0.  , 0.25, 0.25, 0.  , 0.  , 0.25, 0.25,
           0.  , 0.  , 0.  , 0.  , 0.  ])

    """
    if Wo.transform is not 'b':
        try:
            Wo.tranform = 'b'
        except:
            raise AttributeError(
                'Wo is not binary and cannot be transformed to '
                'binary. Wo must be binary or suitably transformed to binary.')
    if Wd.transform is not 'b':
        try:
            Wd.tranform = 'b'
        except:
            raise AttributeError(
                'Wd is not binary and cannot be transformed to '
                'binary. Wd must be binary or suitably transformed to binary.')
    Wo = Wo.sparse
    Wo.eliminate_zeros()
    Wd = Wd.sparse
    Wd.eliminate_zeros()
    Ww = kron(Wo, Wd, format='csr')
    Ww.eliminate_zeros()
    Ww = WSP(Ww).to_W(silence_warnings=silence_warnings)
    Ww.transform = transform
    return Ww
Пример #58
0
def geodesic(nTime,
             nameFileD,
             mub0,
             mub1,
             cCongestion,
             eps,
             Nit,
             detailStudy=False,
             verbose=False,
             tol=1e-6):
    """Implementation of the algorithm for computing the geodesic in the Wasserstein space.

    Arguments.
      nTime: Number of discretization points in time
      nameFileD: Name of the .off file where the triangle mesh is stored
      mub0: initial probability distribution
      mub1: final probability distribution
      cCongestion: constant for the intensity of the regularization
        (alpha in the article)
      eps: regularization parameter for the linear system inversion
      Nit: Number of iterations
      detailStudy: True if the value of the objective functional (i.e. the
        Lagrangian) and the residuals are computed at every time step (slow),
        false if computed every 10 iterations (fast)

    Output.
      phi,mu,A,E,B: values of the relevant quantities in the interpolation,
        cf. the article for more details (Note: E is the momentum, denoted by m
        in the article) objectiveValue: evolution (in term of the number of
        iterations of the ADMM) of the objective value, ie the Lagrangian
      primalResidual,dualResidual: evolution (in term of the number of
        iterations of the ADMM) of primal and dual residuals, cf the article for
        details of their computation.
    """

    startImport = time.time()

    # Boolean value saying wether there is congestion or not
    isCongestion = cCongestion >= 10**(-10)

    if verbose:
        print(15 * "-" + " Parameters for the computation of the geodesic " +
              15 * "-")
        print("Number of discretization points in time: {}".format(nTime))
        print("Name of the mesh file: {}".format(nameFileD))
        if isCongestion:
            print("Congestion parameter: {}\n".format(cCongestion))
        else:
            print("No regularization\n")

    # Time domain: staggered grid
    xTimeS = np.linspace(0, 1, nTime + 1)
    # Step Time
    DeltaTime = xTimeS[1] - xTimeS[0]
    # Time domain: centered grid
    xTimeC = np.linspace(DeltaTime / 2, 1 - DeltaTime / 2, nTime)
    # Domain D: call the routines
    Vertices, Triangles, Edges = read_off.readOff(nameFileD)
    areaTriangles, angleTriangles, baseFunction = geometricQuantities(
        Vertices, Triangles, Edges)
    gradientDMatrix, divergenceDMatrix, LaplacianDMatrix = geometricMatrices(
        Vertices, Triangles, Edges, areaTriangles, angleTriangles,
        baseFunction)
    originTriangles, areaVertices, vertexTriangles = trianglesToVertices(
        Vertices, Triangles, areaTriangles)

    # Size of the domain D
    nVertices = Vertices.shape[0]
    nTriangles = Triangles.shape[0]
    nEdges = Edges.shape[0]

    # Vectorized quantities
    # Enable to call in parallel on [0,1] x D something which is only defined on D.

    # Vectorized arrays
    areaVectorized = np.kron(np.kron(np.ones(6 * nTime), areaTriangles),
                             np.ones(3)).reshape(nTime, 2, 3, nTriangles, 3)
    areaVerticesGlobal = np.kron(np.ones(nTime), areaVertices).reshape(
        (nTime, nVertices))
    areaVerticesGlobalStaggerred = np.kron(np.ones(nTime + 1),
                                           areaVertices).reshape(
                                               (nTime + 1, nVertices))

    # Vectorized matrices
    vertexTrianglesGlobal = scsp.kron(scsp.eye(nTime), vertexTriangles)
    originTrianglesGlobal = scsp.kron(scsp.eye(nTime), originTriangles)

    # Data structure with all the relevant informations. To be used as an argument of functions
    geomDic = {
        "nTime": nTime,
        "DeltaTime": DeltaTime,
        "Vertices": Vertices,
        "Triangles": Triangles,
        "Edges": Edges,
        "areaTriangles": areaTriangles,
        "gradientDMatrix": gradientDMatrix,
        "divergenceDMatrix": divergenceDMatrix,
        "LaplacianDMatrix": LaplacianDMatrix,
        "originTriangles": originTriangles,
        "areaVertices": areaVertices,
        "vertexTriangles": vertexTriangles,
        "nVertices": nVertices,
        "nTriangles": nTriangles,
        "nEdges": nEdges,
        "areaVerticesGlobal": areaVerticesGlobal,
        "areaVectorized": areaVectorized,
    }

    # Build the Laplacian matrix in space time and its inverse
    LaplacianInvert = laplacian_inverse.buildLaplacianMatrix(geomDic, eps)

    # Variable initialization
    # Primal variable phi.
    # Staggerred in Time, defined on the vertices of D
    phi = np.zeros((nTime + 1, nVertices))
    # Lagrange multiplier associated to mu.
    # Centered in Time and lives on the vertices of D
    mu = np.zeros((nTime, nVertices))
    # Momentum E, lagrange mutliplier.
    # Centered in Time, the second component is indicating on which side of
    # the temporal it comes from. Third component indicates the origine of
    # the triangle on which it is. Fourth component is the triangle. Last
    # component corresponds to the fact that we are looking at a vector of R^3.
    E = np.zeros((nTime, 2, 3, nTriangles, 3))

    # Primal Variable A, corresponds to d_t phi.
    # Same staggering pattern as mu
    A = np.zeros((nTime, nVertices))
    # Primal variable B, same pattern as E
    B = np.zeros((nTime, 2, 3, nTriangles, 3))
    # Lagrange multiplier associated to the congestion. If there is no
    # congestion, the value of this parameter will always stay at 0.
    lambdaC = np.zeros((nTime, nVertices))
    # Making sure the boundary values are normalized
    mub0 /= np.sum(mub0)
    mub1 /= np.sum(mub1)
    # Build the boundary term
    BT = np.zeros((nTime + 1, nVertices))
    BT[0, :] = -mub0
    BT[-1, :] = mub1

    # ADMM iterations
    # Value of the "augmentation parameter" for the augmented Lagragian problem (update dynamically)
    r = 1.

    # Initialize the array which will contain the values of the objective functional
    if detailStudy:
        objectiveValue = np.zeros(3 * Nit)
    else:
        objectiveValue = np.zeros((Nit // 10))

    # Initialize the arry which will contain the residuals
    primalResidual = np.zeros(Nit)
    dualResidual = np.zeros(Nit)

    # Main Loop
    for counterMain in range(Nit):
        if verbose:
            print(30 * "-" + " Iteration " + str(counterMain + 1) + " " +
                  30 * "-")

        if detailStudy:
            objectiveValue[3 * counterMain] = objectiveFunctional(
                phi, mu, A, E, B, lambdaC, BT, geomDic, r, cCongestion,
                isCongestion)
        elif (counterMain % 10) == 0:
            objectiveValue[counterMain // 10] = objectiveFunctional(
                phi, mu, A, E, B, lambdaC, BT, geomDic, r, cCongestion,
                isCongestion)

        # Laplace problem
        startLaplace = time.time()
        # Build the RHS
        RHS = np.zeros((nTime + 1, nVertices))
        RHS -= BT * nTime
        RHS -= gradATime(mu, geomDic)
        RHS += np.multiply(r * gradATime(A + lambdaC, geomDic),
                           areaVerticesGlobalStaggerred / 3)
        # We take the adjoint wrt tp the scalar product weighted by areas, hence the multiplication by areaVectorized
        RHS -= divergenceD(E, geomDic)
        RHS += r * divergenceD(np.multiply(B, areaVectorized), geomDic)

        # Solve the system
        phi = 1. / r * LaplacianInvert(RHS)
        endLaplace = time.time()
        if verbose:
            print("Solving the Laplace system: {}s.".format(
                round(endLaplace - startLaplace, 2)))

        if detailStudy:
            objectiveValue[3 * counterMain + 1] = objectiveFunctional(
                phi, mu, A, E, B, lambdaC, BT, geomDic, r, cCongestion,
                isCongestion)

        # Projection over a convex set ---------------------------------------------------------
        # It projects on the set A + 1/2 |B|^2 <= 0. We reduce to a 1D projection, then use a Newton method with a fixed number of iteration.
        startProj = time.time()

        # Computing the derivatives of phi
        dTphi = gradTime(phi, geomDic)
        dDphi = gradientD(phi, geomDic)

        # Computing what there is to project
        toProjectA = dTphi + 3. / r * np.divide(mu, areaVerticesGlobal)
        toProjectB = dDphi + 1. / r * np.divide(E, areaVectorized)

        # bSquaredArray will contain
        # (sum_{a ~ v} |a| |B_{a,v}|**2) / (4*sum_{a ~ v} |a|)
        # for each vertex v
        bSquaredArray = np.zeros((nTime, nVertices))
        # square and sum to compute in account the eulcidean norm and the temporal average
        squareAux = np.sum(np.square(toProjectB), axis=(1, 4))
        # average wrt triangles
        bSquaredArray = originTrianglesGlobal.dot(
            squareAux.reshape(nTime * 3 * nTriangles)).reshape(
                (nTime, nVertices))
        # divide by the sum of the areas of the neighboring triangles
        bSquaredArray = np.divide(bSquaredArray, 4 * areaVerticesGlobal)
        # Value of the objective functional. For the points not in the convex, we want it to vanish.
        projObjective = toProjectA + bSquaredArray
        # projDiscriminating is 1 is the point needs to be projected, 0 if it is already in the convex
        projDiscriminating = (np.greater(
            projObjective, 10**(-16) * np.ones(
                (nTime, nVertices)))).astype(float)

        # Newton method iteration
        # Value of the Lagrange multiplier. Initialized at 0, not updated if already in the convex set
        xProj = np.zeros((nTime, nVertices))
        counterProj = 0

        # Newton's loop
        while np.max(projObjective) > 10**(-10) and counterProj < 20:
            # Objective functional
            projObjective = (toProjectA + 6 * (1. + cCongestion * r) * xProj +
                             np.divide(bSquaredArray, np.square(1 - xProj)))
            # Derivative of the ojective functional
            dProjObjective = 6 * (1. + cCongestion * r) - 2. * np.divide(
                bSquaredArray, np.power(xProj - 1, 3))
            # Update of xProj
            xProj -= np.divide(np.multiply(projDiscriminating, projObjective),
                               dProjObjective)
            counterProj += 1

        # Update of A
        A = toProjectA + 6 * (1. + cCongestion * r) * xProj
        # Update of lambda
        lambdaC = -6 * cCongestion * r * xProj
        # Transfer xProj, which is defined on vertices into something which is defined on triangles
        xProjTriangles = np.kron(
            vertexTrianglesGlobal.dot(xProj.reshape(
                nTime * nVertices)).reshape((nTime, 3, nTriangles)),
            np.ones(3),
        ).reshape((nTime, 3, nTriangles, 3))

        # Update of B
        B[:, 0, :, :, :] = np.divide(toProjectB[:, 0, :, :, :],
                                     1. - xProjTriangles)
        B[:, 1, :, :, :] = np.divide(toProjectB[:, 1, :, :, :],
                                     1. - xProjTriangles)

        # Print the info
        endProj = time.time()
        if verbose:
            print("Pointwise projection: {}s.".format(
                str(round(endProj - startProj, 2))))
            print("{} iterations needed; error committed: {}.".format(
                counterProj, np.max(projObjective)))
        if detailStudy:
            objectiveValue[3 * counterMain + 2] = objectiveFunctional(
                phi, mu, A, E, B, lambdaC, BT, geomDic, r, cCongestion,
                isCongestion)

        # Gradient descent in (E,muTilde), i.e. in the dual
        # No need to recompute the derivatives of phi
        # Update for mu
        mu -= r / 3 * np.multiply(areaVerticesGlobal, A + lambdaC - dTphi)
        # Update for E
        E -= r * np.multiply(areaVectorized, B - dDphi)

        # Compute the residuals
        # For the primal residual, just what was updated in the dual
        primalResidual[counterMain] = sqrt((scalarProductFun(
            A + lambdaC - dTphi,
            np.multiply(A + lambdaC - dTphi, areaVerticesGlobal / 3.0),
            geomDic,
        ) + scalarProductTriangles(B - dDphi, B - dDphi, geomDic)) /
                                           np.sum(areaTriangles))
        # For the residual, take the RHS of the Laplace system and conserve only
        # BT and the dual variables mu, E
        dualResidualAux = np.zeros((nTime + 1, nVertices))
        dualResidualAux += BT / DeltaTime
        dualResidualAux += gradATime(mu, geomDic)
        dualResidualAux += divergenceD(E, geomDic)

        dualResidual[counterMain] = r * sqrt(
            scalarProductFun(
                dualResidualAux,
                np.multiply(dualResidualAux,
                            areaVerticesGlobalStaggerred / 3.0),
                geomDic,
            ) / np.sum(areaTriangles))
        # Break early if residuals are small
        if primalResidual[counterMain] < tol and dualResidual[
                counterMain] < tol:
            break
        # Update the parameter r
        # cf. Boyd et al. for an explanantion of the rule
        if primalResidual[counterMain] >= 10 * dualResidual[counterMain]:
            r *= 2
        elif 10 * primalResidual[counterMain] <= dualResidual[counterMain]:
            r /= 2
        # Printing some results
        if verbose:
            if detailStudy:
                print("Maximizing in phi, should go up: {}".format(
                    objectiveValue[3 * counterMain + 1] -
                    objectiveValue[3 * counterMain]))
                print("Maximizing in A,B, should go up: {}".format(
                    objectiveValue[3 * counterMain + 2] -
                    objectiveValue[3 * counterMain + 1]))
                if counterMain >= 1:
                    print("Dual update: should go down: {}".format(
                        objectiveValue[3 * counterMain] -
                        objectiveValue[3 * counterMain - 1]))
            print("Values of phi: {}/{}\n".format(np.max(phi), np.min(phi)))
            print("Values of A: {}/{}\n".format(np.max(A), np.min(A)))
            print("Values of mu: {}/{}\n".format(np.max(mu), np.min(mu)))
            print("Values of E: {}/{}\n".format(np.max(E), np.min(E)))
            if isCongestion:
                print("Congestion")
                print(
                    scalarProductFun(lambdaC, mu, geomDic) - 1 /
                    (2. * cCongestion) * scalarProductFun(
                        lambdaC, np.multiply(lambdaC, areaVerticesGlobal /
                                             3.), geomDic))
                print(cCongestion / 2. *
                      np.sum(np.divide(np.square(mu), 1 / 3. * areaVertices)) /
                      nTime)

    # Print some informations at the end of the loop
    if verbose:
        print("Final value of the augmenting parameter: {}".format(r))
        # Integral of mu wrt space (depends on time), should sum up to 1.
        intMu = np.sum(mu, axis=(-1))
        print("Minimal and maximal value of int mu: {}/{}".format(
            np.min(intMu), np.max(intMu)))
        print("Maximal and minimal value of mu: {}/{}".format(
            np.min(mu), np.max(mu)))

        dTphi = gradTime(phi, geomDic)
        dDphi = gradientD(phi, geomDic)
        print("Agreement between nabla_t,D and (A,B)")
        print(np.max(np.abs(dTphi - A)))
        print(np.max(np.abs(dDphi - B)))

    endProgramm = time.time()
    print("Primal/dual residuals at end: {}/{}".format(
        primalResidual[counterMain], dualResidual[counterMain]))
    print("Congestion norm: {}".format(
        np.linalg.norm(lambdaC - cCongestion * (mu / (areaVertices / 3)))))
    print("Objective value at end: {}".format(objectiveValue[counterMain //
                                                             10]))
    print("Total number of iterations: {}".format(counterMain))
    print("Total time taken by the computation of the geodesic: {}".format(
        round(endProgramm - startImport, 2)))
    return phi, mu, A, E, B, objectiveValue, primalResidual, dualResidual
Пример #59
0
 data = np.tile([-1,1],length)
 row = np.arange(length).repeat(2)
 
 col=[]
 j=0
 for track in moving:
     end = j+track.shape[0]
     col.append(np.arange(j,end).repeat(2)[1:-1])
     j = end
 col = np.concatenate(col)
 
 M = sparse.csr_matrix((data,(row,col)),(length,con_moving.shape[0]))
 
 ''' Get G '''
 # G = sparse.diags([1,1,1,lamb])
 MG = alpha*sparse.kron(M,sparse.diags([1,1,1,lamb])).tocsr()
 ''' Get Zeros '''
 zer = np.zeros((MG.shape[0],3))
 
 A = sparse.vstack([MG,WD])
 B = np.vstack([zer,WU])
 
 X = np.zeros((A.shape[1],3))
 acon = np.zeros((3))
 
 for i in range(3):
     result = np.array(lsqr(A,B[:,i]))
     X[:,i] = result[0]
     acon[i] = result[6]
 print(acon)
 
Пример #60
0
    def BuildHeisenberg(self):
        lat = self.Lat

        for bond in range(0, lat.Number1neigh):
            for i in range(0, self.Nsite):
                j = lat.nn_[i, bond]
                # print(j)
                if i < j and j >= 0:
                    # Kxx_ij * S_i^x S_j^x
                    self.KxxGraph_[i, j] = self.Kxx
                    self.KxxGraph_[j, i] = self.Kxx

                    # Kyy_ij * S_i^y S_j^y
                    self.KyyGraph_[i, j] = self.Kyy
                    self.KyyGraph_[j, i] = self.Kyy

                    # Kzz_ij * S_i^z S_j^z
                    self.KzzGraph_[i, j] = self.Kzz
                    self.KzzGraph_[j, i] = self.Kzz

        print("\nKxxGraph_:")
        matprint(self.KxxGraph_)
        print("\nKyyGraph_:")
        matprint(self.KyyGraph_)
        print("\nKzzGraph_:")
        matprint(self.KzzGraph_)

        # matprint(self.KxxGraph_-self.KzzGraph_); matprint(self.KxxGraph_-self.KyyGraph_)

        self.KxxPair_, self.Kxxcoef_ = PairConstructor(self.KxxGraph_,
                                                       self.Nsite)
        self.KyyPair_, self.Kyycoef_ = PairConstructor(self.KyyGraph_,
                                                       self.Nsite)
        self.KzzPair_, self.Kzzcoef_ = PairConstructor(self.KzzGraph_,
                                                       self.Nsite)
        # print(self.KzzPair_)
        # print(self.Kzzcoef_)
        # ---------------------Build Hamiltonian as Sparse Matrix-------------------

        print("[Hamiltonian.py] Building Hamiltonian as Sparse Matrix...")
        Spins = Dofs("SpinHalf")
        sx = Spins.Sx
        sy = Spins.Sy
        sz = Spins.Sz

        Hamx = TwoSpinOps(self.KxxPair_, self.Kxxcoef_, sx, sx, self.Nsite)
        Hamy = TwoSpinOps(self.KyyPair_, self.Kyycoef_, sy, sy, self.Nsite)
        Hamz = TwoSpinOps(self.KzzPair_, self.Kzzcoef_, sz, sz, self.Nsite)

        Ham = Hamx + Hamy + Hamz

        # --------------------------- Add external field -------------------------

        for i in range(0, self.Nsite):
            ida = sp.eye(2**i)
            idb = sp.eye(2**(self.Nsite - i - 1))
            Ham += sp.kron(ida, sp.kron(sx, idb)) * self.Hx
            Ham += sp.kron(ida, sp.kron(sy, idb)) * self.Hy
            Ham += sp.kron(ida, sp.kron(sz, idb)) * self.Hz

        return Ham