def test_Nup(n,m,S): nmax = int(eval("2*"+S)) N = n**2 + m**2 # Nups=range(nmax*N+1) Nups = [2] a_1 = np.array([0,1]) a_2 = np.array([1,0]) T1,t1,T2,t2,Pr,pr,Tx,Ty = tilted_square_transformations(n,m,a_1,a_2) Jzz = [[-1.0,i,Tx[i]] for i in range(N)] Jzz.extend([-1.0,i,Ty[i]] for i in range(N)) hx = [[-1.0,i] for i in range(N)] fzz = lambda x:1-x fx = lambda x:x dynamic=[["zz",Jzz,fzz,()],["+-",Jzz,fx,()],["-+",Jzz,fx,()]] ss = np.linspace(0,1,11) for Nup in Nups: basis_full = spin_basis_general(N,S=S,Nup=Nup) H_full = hamiltonian([],dynamic,basis=basis_full,dtype=np.float64) E_full = [] for s in ss: E_full.append(H_full.eigvalsh(time=s)) E_full = np.vstack(E_full) E_symm = np.zeros((E_full.shape[0],0),dtype=E_full.dtype) no_checks = dict(check_symm=False,check_pcon=False,check_herm=False) for blocks in get_blocks(T1,t1,T2,t2,Pr,pr): basis = spin_basis_general(N,S=S,Nup=Nup,**blocks) H = hamiltonian([],dynamic,basis=basis,dtype=np.complex128,**no_checks) if H.Ns == 0: continue block,values = zip(*blocks.items()) Tr,qs = zip(*values) print(basis.Ns,Nup,list(zip(block,qs))) for Hd in H._dynamic.values(): dH=(Hd-Hd.T.conj()) n = "{} {} {}".format(basis.Ns,Nup,list(zip(block,qs))) np.testing.assert_allclose(dH.data,0,atol=1e-7,err_msg=str(n)) E_list = [] for i,s in enumerate(ss): E = H.eigvalsh(time=s) E_list.append(E) E_list = np.vstack(E_list) E_symm = np.hstack((E_symm,E_list)) E_symm.sort(axis=1) np.testing.assert_allclose(E_symm,E_full,atol=1e-13)
def get_operators(size, Nb): n, m = size if Nb % 2 == 1: S = "{}/2".format(Nb) else: S = "{}".format(Nb // 2) bath_basis = spin_basis_general(1, S=S) N = n**2 + m**2 Ns_block_est = max((2**N) / (N), 1000) if n != 0: T1, t1, T2, t2, Pr, pr, Tx, Ty = tilted_square_transformations(n, m) blocks = dict(tx=(Tx, 0), ty=(Ty, 0), pb=(Pr, 0)) spin_basis = spin_basis_general(N, S="1/2", pauli=True, Ns_block_est=Ns_block_est, **blocks) else: L = m tr = square_lattice_trans(L, L) Tx = tr.T_x Ty = tr.T_y blocks = dict(tx=(Tx, 0), ty=(Ty, 0), px=(tr.P_x, 0), py=(tr.P_y, 0), pd=(tr.P_d, 0)) spin_basis = spin_basis_general(N, S="1/2", pauli=True, Ns_block_est=Ns_block_est, **blocks) basis = tensor_basis(spin_basis, bath_basis) J_list = [[-1.0, i, Tx[i]] for i in range(N)] J_list.extend([-1.0, i, Ty[i]] for i in range(N)) M_list = [[1.0 / N**2, i, i] for i in range(N)] M_list += [[2.0 / N**2, i, j] for i in range(N) for j in range(N) if i > j] kwargs = dict(basis=basis, dtype=np.float64, check_symm=False, check_pcon=False, check_herm=False) print size H_S = hamiltonian([["zz|", J_list]], [], **kwargs) M2 = hamiltonian([["zz|", M_list]], [], **kwargs) return H_S, M2
def test(S,Lx,Ly): N = Lx*Ly nmax = int(eval("2*"+S)) sps = nmax+1 tr = square_lattice_trans(Lx,Ly) basis_dict = {} Nups=range(nmax*N+1) for Nup in Nups: basis_blocks=[] pcon_basis = spin_basis_general(N,Nup=Nup,S=S) Ns_block = 0 for blocks in tr.allowed_blocks_spin_inversion_iter(Nup,sps): basis = spin_basis_general(N,Nup=Nup,S=S,**blocks) Ns_block += basis.Ns basis_blocks.append(basis) try: assert(Ns_block == pcon_basis.Ns) except AssertionError: print(Nup,Ns_block,pcon_basis.Ns) raise AssertionError("reduced blocks don't sum to particle sector.") basis_dict[Nup] = (pcon_basis,basis_blocks) J = [[1.0,i,tr.T_x[i]] for i in range(N)] J.extend([[1.0,i,tr.T_y[i]] for i in range(N)]) static = [["zz",J],["+-",J],["-+",J]] E_symm = {} for Nb,(pcon_basis,basis_blocks) in basis_dict.items(): H_pcon = hamiltonian(static,[],basis=pcon_basis,dtype=np.float64) if H_pcon.Ns>0: E_pcon = np.linalg.eigvalsh(H_pcon.todense()) else: E_pcon = np.array([]) E_block = [] for basis in basis_blocks: H = hamiltonian(static,[],basis=basis,dtype=np.complex128) if H.Ns>0: E_block.append(np.linalg.eigvalsh(H.todense())) E_block = np.hstack(E_block) E_block.sort() np.testing.assert_allclose(E_pcon,E_block,atol=1e-13) print("passed Nb={} sector".format(Nb))
def auto_correlator_symm(L, times, S="1/2"): # define momentum p sector of the GS of the Heisenberg Hamiltonian if (L // 2) % 2: p = L // 2 # corresponds to momentum pi dtype = np.complex128 else: p = 0 dtype = np.float64 # # define translation operator T = (np.arange(L) + 1) % L # compute the basis in the momentum sector of the GS of the Heisenberg model basis_p = spin_basis_general(L, S=S, m=0, kblock=(T, p), pauli=False) # define Heisenberg Hamiltonian no_checks = dict(check_symm=False, check_herm=False, check_pcon=False) H = hamiltonian(static, [], basis=basis_p, dtype=dtype, **no_checks) # compute GS E, V = H.eigsh(k=1, which="SA") psi_GS = V[:, 0] # evolve GS under symmetry-reduced H (gives a trivial phase factor) psi_GS_t = H.evolve(psi_GS, 0, times) # ##### compute autocorrelation function foe every momentum sector Cq_t = np.zeros((times.shape[0], L), dtype=np.complex128) # for q in range(L): # sum over symmetry sectors # ###### define operator O_q, sum over lattice sites op_list = [[ "z", [j], (np.sqrt(2.0) / L) * np.exp(-1j * 2.0 * np.pi * q * j / L) ] for j in range(L)] # compute basis in the (q+p)-momentum sector (the total momentum of O_q|psi_GS> is q+p) basis_q = spin_basis_general(L, S=S, m=0, kblock=(T, p + q), pauli=False) # define Hamiltonian in the q-momentum sector Hq = hamiltonian(static, [], basis=basis_q, dtype=np.complex128, **no_checks) # use Op_shift_sector apply operator O_q to GS; the momentum of the new state is p+q Opsi_GS = basis_q.Op_shift_sector(basis_p, op_list, psi_GS) # time evolve Opsi_GS under H_q Opsi_GS_t = Hq.evolve(Opsi_GS, 0.0, times) # apply operator O on time-evolved psi_t O_psi_GS_t = basis_q.Op_shift_sector(basis_p, op_list, psi_GS_t) # compute autocorrelator for every momentum sector Cq_t[..., q] = np.einsum("ij,ij->j", O_psi_GS_t.conj(), Opsi_GS_t) # return np.sum(Cq_t, axis=1) # sum over momentum sectors
def corr_symm(L, times, S="1/2"): J_list = [[1.0, i, (i + 1) % L] for i in range(L)] static = [[op, J_list] for op in ["-+", "+-", "zz"]] if (L // 2) % 2: q0 = L // 2 dtype = np.complex128 else: q0 = 0 dtype = np.float64 t = (np.arange(L) + 1) % L basis = spin_basis_general(L, S=S, m=0, kblock=(t, q0), pauli=False) kwargs = dict(basis=basis, dtype=dtype, check_symm=False, check_herm=False, check_pcon=False) H = hamiltonian(static, [], **kwargs) E, V = H.eigsh(k=1, which="SA") psi0 = V[:, 0] sqs = [] psi0_t = H.evolve(psi0.ravel(), 0, times) for q in range(L): op_pq = [["z", [i], (2.0 / L) * np.exp(-2j * np.pi * q * i / L)] for i in range(L)] basis_q = spin_basis_general(L, S=S, m=0, kblock=(t, q0 + q), pauli=False) kwargs = dict(basis=basis_q, dtype=np.complex128, check_symm=False, check_herm=False, check_pcon=False) Hq = hamiltonian(static, [], **kwargs) psi1 = basis_q.Op_shift_sector(basis, op_pq, psi0) psi1_t = Hq.evolve(psi1, 0, times) psi2_t = basis_q.Op_shift_sector(basis, op_pq, psi0_t) sqs.append(np.einsum("ij,ij->j", psi2_t.conj(), psi1_t)) return sum(sqs)
def get_H(L, pblock=None, zblock=None): p = np.arange(L)[::-1] z = -(np.arange(L) + 1) blocks = {} if pblock is not None: blocks["pblock"] = (p, pblock) if zblock is not None: blocks["zblock"] = (z, zblock) basis = spin_basis_general(L, m=0, pauli=False, **blocks) Jzz_list = [[1.0, i, (i + 1) % L] for i in range(L)] Jxy_list = [[0.5, i, (i + 1) % L] for i in range(L)] static = [[op, Jxy_list] for op in ["+-", "-+"]] + [["zz", Jzz_list]] kwargs = dict(basis=basis, dtype=np.float64, check_symm=False, check_herm=False, check_pcon=False) H_LO = quantum_LinearOperator(static, **kwargs) H = hamiltonian(static, [], **kwargs) return H_LO, H
def corr_nosymm(L, times, S="1/2"): J_list = [[1.0, i, (i + 1) % L] for i in range(L)] static = [[op, J_list] for op in ["-+", "+-", "zz"]] basis = spin_basis_general(L, S=S, m=0, pauli=False) kwargs = dict(basis=basis, dtype=np.float64, check_symm=False, check_herm=False, check_pcon=False) H = hamiltonian(static, [], **kwargs) E, V = H.eigsh(k=1, which="SA") psi0 = V[:, 0] psi0_t = H.evolve(psi0, 0, times) sqs = [] op_list = [["z", [0], 2.0]] # import inspect # print(inspect.getsource(basis.inplace_Op)) psi1 = basis.inplace_Op(psi0, op_list, np.float64) psi1_t = H.evolve(psi1, 0, times) psi2_t = basis.inplace_Op(psi0_t, op_list, np.float64) sqs.append(np.einsum("ij,ij->j", psi2_t.conj(), psi1_t)) return sum(sqs)
def auto_correlator(L, times, S="1/2"): # construct basis in zero magnetization sector: no lattice symmetries basis = spin_basis_general(L, S=S, m=0, pauli=False) # define Heisenberg Hamiltonian no_checks = dict(check_symm=False, check_herm=False, check_pcon=False) H = hamiltonian(static, [], basis=basis, dtype=np.float64, **no_checks) # compute GS E, V = H.eigsh(k=1, which="SA") psi_GS = V[:, 0] # evolve GS under H (gives a trivial phase factor) psi_GS_t = H.evolve(psi_GS, 0.0, times) # ###### define operator O to compute the autocorrelation function of # op_list = [["z", [0], np.sqrt(2.0)]] # use inplace_Op to apply operator O on psi_GS Opsi_GS = basis.inplace_Op(psi_GS, op_list, np.float64) # time evolve Opsi_GS under H Opsi_GS_t = H.evolve(Opsi_GS, 0.0, times) # apply operator O on time-evolved psi_t O_psi_GS_t = basis.inplace_Op(psi_GS_t, op_list, np.float64) # compute autocorrelator C_t = np.einsum("ij,ij->j", O_psi_GS_t.conj(), Opsi_GS_t) # return C_t
def make_basis(N_half): """ Generates a list of integers to represent external, user-imported basis """ old_basis = spin_basis_general(N_half, m=0) # states = old_basis.states shift_states = np.left_shift(states, N_half) # shape = states.shape + states.shape # states_b = np.broadcast_to(states, shape) shift_states_b = np.broadcast_to(shift_states, shape) # this does the kronecker sum in a more memory efficient way. return (states_b + shift_states_b.T).ravel()
def exact_diag(J,Hx,Hz,Lx,Ly): N_2d = Lx*Ly # number of sites ###### setting up user-defined symmetry transformations for 2d lattice ###### s = np.arange(N_2d) # sites [0,1,2,....] x = s%Lx # x positions for sites y = s//Lx # y positions for sites T_x = (x+1)%Lx + Lx*y # translation along x-direction T_y = x +Lx*((y+1)%Ly) # translation along y-direction mT_y = x +Lx*((y+Ly-1)%Ly) # translation along y-direction P_x = x + Lx*(Ly-y-1) # reflection about x-axis P_y = (Lx-x-1) + Lx*y # reflection about y-axis Z = -(s+1) # spin inversion ###### setting up bases ###### # basis_2d = spin_basis_general(N=N_2d,S="1/2",pauli=0) basis_2d = spin_basis_general(N=N_2d,S="1/2",pauli=0,kxblock=(T_x,0),kyblock=(T_y,0)) ###### setting up hamiltonian ###### # setting up site-coupling lists Jzzs = [[J,i,T_x[i]] for i in range(N_2d)]+[[J,i,T_y[i]] for i in range(N_2d)] Hxs = [[-Hx,i] for i in range(N_2d)] Hzs = [[-Hz,i] for i in range(N_2d)] static = [["zz",Jzzs],["x",Hxs],["z",Hzs]] # build hamiltonian # H = hamiltonian(static,[],static_fmt="csr",basis=basis_2d,dtype=np.float64) no_checks = dict(check_symm=False, check_pcon=False, check_herm=False) H = hamiltonian(static,[],static_fmt="csr",basis=basis_2d,dtype=np.float64,**no_checks) # diagonalise H ene,vec = H.eigsh(time=0.0,which="SA",k=2) # ene = H.eigsh(time=0.0,which="SA",k=2,return_eigenvectors=False); ene = np.sort(ene) norm2 = np.linalg.norm(vec[:,0])**2 # calculate uniform magnetization int_mx = [[1.0,i] for i in range(N_2d)] int_mz = [[1.0,i] for i in range(N_2d)] static_mx = [["x",int_mx]] static_mz = [["z",int_mz]] op_mx = hamiltonian(static_mx,[],static_fmt="csr",basis=basis_2d,dtype=np.float64,**no_checks).tocsr(time=0) op_mz = hamiltonian(static_mz,[],static_fmt="csr",basis=basis_2d,dtype=np.float64,**no_checks).tocsr(time=0) mx = (np.conjugate(vec[:,0]).dot(op_mx.dot(vec[:,0])) / norm2).real / N_2d mz = (np.conjugate(vec[:,0]).dot(op_mz.dot(vec[:,0])) / norm2).real / N_2d # calculate n.n. sz.sz correlation int_mz0mz1 = [[1.0,i,T_x[i]] for i in range(N_2d)]+[[1.0,i,T_y[i]] for i in range(N_2d)] static_mz0mz1 = [["zz",int_mz0mz1]] op_mz0mz1 = hamiltonian(static_mz0mz1,[],static_fmt="csr",basis=basis_2d,dtype=np.float64,**no_checks).tocsr(time=0) mz0mz1 = (np.conjugate(vec[:,0]).dot(op_mz0mz1.dot(vec[:,0])) / norm2).real / N_2d # calculate sz(0,0).sz(1,1) correlation int_mz0mzsq2 = [[1.0,i,T_y[T_x[i]]] for i in range(N_2d)]+[[1.0,i,mT_y[T_x[i]]] for i in range(N_2d)] static_mz0mzsq2 = [["zz",int_mz0mzsq2]] op_mz0mzsq2 = hamiltonian(static_mz0mzsq2,[],static_fmt="csr",basis=basis_2d,dtype=np.float64,**no_checks).tocsr(time=0) mz0mzsq2 = (np.conjugate(vec[:,0]).dot(op_mz0mzsq2.dot(vec[:,0])) / norm2).real / N_2d return ene, mx, mz, mz0mz1, mz0mzsq2
def run_computation(): # ###### define model parameters ###### J1=1.0 # spin=spin interaction J2=0.5 # magnetic field strength Omega=8.0 # drive frequency Lx, Ly = 4, 4 # linear dimension of spin 1 2d lattice N_2d = Lx*Ly # number of sites for spin 1 # ###### setting up user-defined symmetry transformations for 2d lattice ###### sites = np.arange(N_2d) # sites [0,1,2,....] x = sites%Lx # x positions for sites y = sites//Lx # y positions for sites # T_x = (x+1)%Lx + Lx*y # translation along x-direction T_y = x +Lx*((y+1)%Ly) # translation along y-direction # T_a = (x+1)%Lx + Lx*((y+1)%Ly) # translation along anti-diagonal T_d = (x-1)%Lx + Lx*((y+1)%Ly) # translation along diagonal # ###### setting up bases ###### basis_2d = spin_basis_general(N_2d,pauli=False) # making the basis sped up by OpenMP print('finished computing basis') # ###### setting up hamiltonian ###### # set up time-dependence def drive(t,Omega): return np.cos(Omega*t) drive_args=[Omega,] # setting up site-coupling lists J1_list=[[J1,i,T_x[i]] for i in range(N_2d)] + [[J1,i,T_y[i]] for i in range(N_2d)] J2_list=[[J2,i,T_d[i]] for i in range(N_2d)] + [[J2,i,T_a[i]] for i in range(N_2d)] # static =[ ["xx",J1_list],["yy",J1_list],["zz",J1_list] ] dynamic=[ ["xx",J2_list,drive,drive_args],["yy",J2_list,drive,drive_args],["zz",J2_list,drive,drive_args] ] # build hamiltonian H=hamiltonian(static,[],basis=basis_2d,dtype=np.float64,check_symm=False,check_herm=False) # diagonalise H E,V=H.eigsh(time=0.0,k=50,which='LA') # H.eigsh sped up by MKL print('finished computing energies') psi_0=V[:,0] # evolve state t=np.linspace(0.0,20*2*np.pi/Omega,21) psi_t=H.evolve(psi_0,t[0],t,iterate=True) # H.evolve sped up by OpenMP for j,psi in enumerate(psi_t): E_t = H.expt_value(psi,time=t[j]) print("finished evolving up to time step {:d}".format(j) )
def maxcut_to_quantum(structure, system_size, fill, interaction_shape, interaction_radius): prob = maxcut.initialize_problem(structure, system_size, fill, interaction_shape, interaction_radius) N = prob.get_num_vertices() basis = spin_basis_general(N) # Hamiltonian terms for Ising interactions and reference field J_zz = prob.get_edges() h_x = [[-1, i] for i in range(N)] # Hamiltonian for Ising interactions static = [["zz", J_zz]] dynamic = [] H = hamiltonian(static, dynamic, basis=basis, dtype=np.float64) # reference Hamiltonian B = hamiltonian([["x", h_x]], [], dtype=np.float64, basis=basis, check_herm=False, check_symm=False) # initial state: all up in x basis (ground state of reference Hamiltonian) psi_0 = (1 / (2**(N / 2))) * np.ones(2**N, ) # all up in x basis # exact ground states states = util.get_states_str(system_size) ground_state_energy, num_ground_states, ground_states = classical_algorithms.BruteForce( ).solve(prob, allGroundStates=True) ground_states_id = [] for ground_state in ground_states: ground_state_str = '' for v in range(N): ground_state_str += str(int((ground_state[v] + 1) / 2)) ground_states_id.append(states.index(ground_state_str)) return H, B, psi_0, ground_states_id
def get_operators(L): N = 2 * L Tx = (np.arange(L) + 1) % L Tx = np.hstack((Tx, Tx + L)) P = np.arange(L)[::-1] P = np.hstack((P, P + L)) basis = spin_basis_general(N, pauli=True, pblk=(P, 0), kblk=(Tx, 0)) J_list = [[-1, i, (i + 1) % L] for i in range(L)] M_list = [[1.0 / L**2, i, j] for i in range(L) for j in range(L)] kwargs = dict(basis=basis, dtype=np.float64, check_symm=False, check_pcon=False, check_herm=False) print basis.N H_S = hamiltonian([["zz", J_list]], [], **kwargs) M2 = hamiltonian([["zz", M_list]], [], **kwargs) return H_S, M2
err_msg=err_msg) np.testing.assert_allclose(P.H.dot(v_full), basis.project_to(v_full, sparse=False), atol=1e-10, err_msg=err_msg) L = 12 assert (L >= 3) z = -(np.arange(L) + 1) p = np.arange(L)[::-1] t = (np.arange(L) + 1) % L bases = [ spin_basis_general(L), spin_basis_general(L, Nup=L // 2), spin_basis_general(L, zb=(z, 0)), spin_basis_general(L, zb=(z, 1)), spin_basis_general(L, pb=(p, 0)), spin_basis_general(L, pb=(p, 1)), spin_basis_general(L, zb=(z, 0), pb=(p, 0)), spin_basis_general(L, zb=(z, 0), pb=(p, 1)), spin_basis_general(L, zb=(z, 1), pb=(p, 0)), spin_basis_general(L, zb=(z, 1), pb=(p, 1)), spin_basis_general(L, zb=(z, 0), pb=(t, 0)), spin_basis_general(L, zb=(z, 0), pb=(t, 1)), spin_basis_general(L, zb=(z, 1), pb=(t, 0)), spin_basis_general(L, zb=(z, 1), pb=(t, 1)), spin_basis_general(L, zb=(z, 0), pb=(p, 0), tb=(t, 0)), spin_basis_general(L, zb=(z, 0), pb=(p, 0), tb=(t, L - 1)),
def prepare_H_vec(J, Hx, Hz, Lx, Ly): N_2d = Lx * Ly # number of sites ###### setting up user-defined symmetry transformations for 2d lattice ###### s = np.arange(N_2d) # sites [0,1,2,....] x = s % Lx # x positions for sites y = s // Lx # y positions for sites T_x = (x + 1) % Lx + Lx * y # translation along x-direction T_y = x + Lx * ((y + 1) % Ly) # translation along y-direction mT_y = x + Lx * ((y + Ly - 1) % Ly) # translation along y-direction P_x = x + Lx * (Ly - y - 1) # reflection about x-axis P_y = (Lx - x - 1) + Lx * y # reflection about y-axis Z = -(s + 1) # spin inversion ###### setting up bases ###### # basis_2d = spin_basis_general(N=N_2d,S="1/2",pauli=0) basis_2d = spin_basis_general(N=N_2d, S="1/2", pauli=0, kxblock=(T_x, 0), kyblock=(T_y, 0)) # print(basis_2d) # print(basis_2d.Ns) ###### prepare initial state (all down, fock state |000...000> in QuSpin) ###### ## http://weinbe58.github.io/QuSpin/generated/quspin.basis.spinful_fermion_basis_1d.html?highlight=partial%20trace#quspin.basis.spinful_fermion_basis_1d.index ## http://weinbe58.github.io/QuSpin/generated/quspin.basis.spin_basis_1d.html#quspin.basis.spin_basis_1d.index ## i0 = basis_2d.index("1111111111111111") # up # i0 = basis_2d.index("0000000000000000") # down s_down = "".join("0" for i in range(N_2d)) i_down = basis_2d.index(s_down) vec = np.zeros(basis_2d.Ns, dtype=np.float64) vec[i_down] = 1.0 # print(s_down) # print(i_down) # print(vec) ###### setting up hamiltonian ###### # setting up site-coupling lists Jzzs = [[J, i, T_x[i]] for i in range(N_2d)] + [[J, i, T_y[i]] for i in range(N_2d)] Hxs = [[-Hx, i] for i in range(N_2d)] Hzs = [[-Hz, i] for i in range(N_2d)] static = [["zz", Jzzs], ["x", Hxs], ["z", Hzs]] # build hamiltonian # H = hamiltonian(static,[],static_fmt="csr",basis=basis_2d,dtype=np.float64) no_checks = dict(check_symm=False, check_pcon=False, check_herm=False) H = hamiltonian(static, [], static_fmt="csr", basis=basis_2d, dtype=np.float64, **no_checks) H = H.tocsr(time=0) # operator for uniform magnetization int_mx = [[1.0, i] for i in range(N_2d)] int_mz = [[1.0, i] for i in range(N_2d)] static_mx = [["x", int_mx]] static_mz = [["z", int_mz]] op_mx = hamiltonian(static_mx, [], static_fmt="csr", basis=basis_2d, dtype=np.float64, **no_checks).tocsr(time=0) op_mz = hamiltonian(static_mz, [], static_fmt="csr", basis=basis_2d, dtype=np.float64, **no_checks).tocsr(time=0) # operator for n.n. sz.sz correlation int_mz0mz1 = [[1.0, i, T_x[i]] for i in range(N_2d)] + [[1.0, i, T_y[i]] for i in range(N_2d)] static_mz0mz1 = [["zz", int_mz0mz1]] op_mz0mz1 = hamiltonian(static_mz0mz1, [], static_fmt="csr", basis=basis_2d, dtype=np.float64, **no_checks).tocsr(time=0) # operator for sz(0,0).sz(1,1) correlation int_mz0mzsq2 = [[1.0, i, T_y[T_x[i]]] for i in range(N_2d)] + [[1.0, i, mT_y[T_x[i]]] for i in range(N_2d)] static_mz0mzsq2 = [["zz", int_mz0mzsq2]] op_mz0mzsq2 = hamiltonian(static_mz0mzsq2, [], static_fmt="csr", basis=basis_2d, dtype=np.float64, **no_checks).tocsr(time=0) # operator for sz(0,0).sz(0,2) correlation int_mz0mz2 = [[1.0, i, T_x[T_x[i]]] for i in range(N_2d)] + [[1.0, i, T_y[T_y[i]]] for i in range(N_2d)] static_mz0mz2 = [["zz", int_mz0mz2]] op_mz0mz2 = hamiltonian(static_mz0mz2, [], static_fmt="csr", basis=basis_2d, dtype=np.float64, **no_checks).tocsr(time=0) return N_2d, H, vec, op_mx, op_mz, op_mz0mz1, op_mz0mzsq2, op_mz0mz2
def compare(static_list,basis,basis_op): for opstr,indx,J in static_list: ME,bra,ket = basis.Op_bra_ket(opstr,indx,J,np.float64,basis_op.states) ME_op,row,col = basis_op.Op(opstr,indx,J,np.float64) np.testing.assert_allclose(bra - basis_op[row],0.0,atol=1E-5,err_msg='failed bra/row in Op_bra_cket test!') np.testing.assert_allclose(ket - basis_op[col],0.0,atol=1E-5,err_msg='failed ket/col in Op_bra_ket test!') np.testing.assert_allclose(ME - ME_op,0.0,atol=1E-5,err_msg='failed ME in Op_bra_ket test!') for Np in [ None, 2, N_2d-1, [N_2d//4,N_2d//8] ]: basis=spin_basis_general(N_2d, make_basis=False, Nup=Np, kxblock=(T_x,0),kyblock=(T_y,0), pxblock=(P_x,0),pyblock=(P_y,0), zblock=(Z,0) ) basis_op=spin_basis_general(N_2d, make_basis=True, Nup=Np, kxblock=(T_x,0),kyblock=(T_y,0), pxblock=(P_x,0),pyblock=(P_y,0), zblock=(Z,0) ) compare(static_list,basis,basis_op) print('passed spins')
for k in range(L): for q in range(L): print("testing k={} -> k+q={}".format(k,(k+q)%L)) # use standard static list for this. # use generators to generate coupling list op_list = [["z",[i],np.exp(-2j*np.pi*q*i/L)] for i in range(L)] #coupling=[[np.exp(-2j*np.pi*q*i/L),i] for i in range(L)] #op_list = [["z",coupling]] t = (np.arange(L)+1)%L b = spin_basis_general(L) b1 = spin_basis_general(L,kblock=(t,k)) b2 = spin_basis_general(L,kblock=(t,k+q)) # print(b1) # print(b2) P1 = b1.get_proj(np.complex128) P2 = b2.get_proj(np.complex128) v_in = np.random.normal(0,1,size=b1.Ns) + 1j*np.random.normal(0,1,size=b1.Ns) v_in /= np.linalg.norm(v_in) v_in_full = P1.dot(v_in) v_out_full = b.inplace_Op(v_in_full,op_list,np.complex128)
###### setting up user-defined symmetry transformations for 2d lattice ###### s = np.arange(N_2d) # sites [0,1,2,....] x = s % Lx # x positions for sites y = s // Lx # y positions for sites T_x = (x + 1) % Lx + Lx * y # translation along x-direction T_y = x + Lx * ((y + 1) % Ly) # translation along y-direction P_x = x + Lx * (Ly - y - 1) # reflection about x-axis P_y = (Lx - x - 1) + Lx * y # reflection about y-axis Z = -(s + 1) # spin inversion # ###### setting up bases ###### basis_2d = spin_basis_general(N=N_2d, Nup=N_2d // 2, S="1/2", pauli=0, kxblock=(T_x, 0), kyblock=(T_y, 0), pxblock=(P_x, 0), pyblock=(P_y, 0), zblock=(Z, 0)) #basis_2d = spin_basis_general(N=N_2d,Nup=N_2d//2,S="1/2",pauli=0) # ###### setting up hamiltonian ###### # setting up site-coupling lists Jzzs = [[J, i, T_x[i]] for i in range(N_2d)] + [[J, i, T_y[i]] for i in range(N_2d)] Jpms = [[0.5 * J, i, T_x[i]] for i in range(N_2d)] + [[0.5 * J, i, T_y[i]] for i in range(N_2d)] Jmps = [[0.5 * J, i, T_x[i]] for i in range(N_2d)] + [[0.5 * J, i, T_y[i]] for i in range(N_2d)] #
def anneal_bath(L, T, gamma=0.01, path="."): ti = time.time() filename = os.path.join( path, "spin_bath_exact_L_{}_T_{}_gamma_{}.npz".format(L, T, gamma)) if os.path.isfile(filename): print "file_exists...exiting run." exit() N = 2 * L Tx = (np.arange(L) + 1) % L Tx = np.hstack((Tx, Tx + L)) P = np.arange(L)[::-1] P = np.hstack((P, P + L)) print "creating basis" basis = spin_basis_general(N, pblk=(P, 0), kblk=(Tx, 0)) print "L={}, H-space size: {}".format(L, basis.Ns) Jzz_list = [[-1, i, (i + 1) % L] for i in range(L)] hx_list = [[-1, i] for i in range(L)] hop_bath_list = [[-1.0, L + i, L + (i + 1) % L] for i in range(L)] hop_bath_list += [[-1.0, L + i, L + (i + 1) % L] for i in range(L)] int_2_list = [[1.0, L + i, L + (i + 1) % L] for i in range(L)] int_2_list += [[1.0, L + i, L + (i + 2) % L] for i in range(L)] int_1_list = [[2.0, L + i] for i in range(L)] bath_sys_list = [[gamma, i, i + L] for i in range(L)] Jb_list = [[gamma, i, L + i] for i in range(L)] A = lambda t: (t / T)**2 B = lambda t: (1 - t / T)**2 static = [["+-", hop_bath_list], ["-+", hop_bath_list], ["zz", int_2_list], ["z", int_1_list]] dynamic = [ ["zz", Jzz_list, A, ()], ["x", hx_list, B, ()], ["+-", bath_sys_list, B, ()], ["-+", bath_sys_list, B, ()], ] kwargs = dict(basis=basis, dtype=np.float64, check_symm=False, check_pcon=False, check_herm=False) H = hamiltonian(static, dynamic, **kwargs) print "creating hamiltonian" kwargs = dict(basis=basis, dtype=np.float64, check_symm=False, check_pcon=False, check_herm=False) H = hamiltonian([], dynamic, **kwargs) print "solving initial state" E0, psi_0 = H.eigsh(k=1, which="SA", time=0) psi_0 = psi_0.ravel() print "evolving" out = np.zeros(psi_0.shape, dtype=np.complex128) psi_f = evolve(psi_0, 0, T, H._hamiltonian__omp_SO, f_params=(out, ), solver_name="dop853", atol=1.1e-15, rtol=1.1e-15) psi_f /= np.linalg.norm(psi_f) print "saving" np.savez_compressed(filename, psi=psi_f) print "dome.... {} sec".format(time.time() - ti)
for i in range(N_2d)] # setting up opstr list static = [["xx", J1_list], ["yy", J1_list], ["zz", J1_list], ["xx", J2_list], ["yy", J2_list], ["zz", J2_list]] # convert static list to format which is easy to use with the basis_general.Op and basis_general.Op_bra_ket methods. static_formatted = _consolidate_static(static) # ###### setting up basis object without computing the basis (make=False) ###### basis = spin_basis_general( N_2d, pauli=0, make_basis=False, Nup=N_2d // 2, kxblock=(T_x, 0), kyblock=(T_y, 0), pxblock=(P_x, 0), pyblock=(P_y, 0), pdblock=(P_d, 0), zblock=(Z, 0), block_order=[ 'zblock', 'pdblock', 'pyblock', 'pxblock', 'kyblock', 'kxblock' ] # momentum symmetry comes last for speed ) print( basis ) # examine basis: contains a single element because it is not calculated due to make_basis=False argument above. print('basis is empty [note argument make_basis=False]') # ###### define quantum state to compute the energy of using Monte-Carlo sampling ###### # # auxiliary basis, only needed for probability_amplitude(); not needed in a proper variational ansatz.
N_2d = Lx * Ly # number of sites for spin 1 # ###### setting up user-defined symmetry transformations for 2d lattice ###### s = np.arange(N_2d) # sites [0,1,2,....] x = s % Lx # x positions for sites y = s // Lx # y positions for sites T_x = (x + 1) % Lx + Lx * y # translation along x-direction T_y = x + Lx * ((y + 1) % Ly) # translation along y-direction P_x = x + Lx * (Ly - y - 1) # reflection about x-axis P_y = (Lx - x - 1) + Lx * y # reflection about y-axis Z = -(s + 1) # spin inversion # ###### setting up bases ###### basis_2d = spin_basis_general(N_2d, kxblock=(T_x, 0), kyblock=(T_y, 0), pxblock=(P_x, 0), pyblock=(P_y, 0), zblock=(Z, 0)) # ###### setting up hamiltonian ###### # setting up site-coupling lists Jzz = [[J, i, T_x[i]] for i in range(N_2d)] + [[-1.0, i, T_y[i]] for i in range(N_2d)] gx = [[g, i] for i in range(N_2d)] # static = [["zz", Jzz], ["x", gx]] # build hamiltonian H = hamiltonian(static, [], basis=basis_2d, dtype=np.float64) # diagonalise H E = H.eigvalsh()
def makeBasis(N, S1, S2): basis1 = spin_basis_general(N=N, S=S1) basis2 = spin_basis_general(N=N, S=S2) basis = tensor_basis(basis1, basis2) return basis
import numpy as np import tempfile, os def Jr(r, alpha): return (-1)**(r + 1) / r**(alpha) L = 10 alpha = 2.0 t = (np.arange(L) + 1) % L p = np.arange(L)[::-1] z = -(np.arange(L) + 1) basis = spin_basis_general(L, m=0.0, t=(t, 0), p=(p, 0), z=(z, 0), pauli=False) Jzz_list = [[Jr(r, alpha), i, (i + r) % L] for i in range(L) for r in range(1, L // 2, 1)] Jxy_list = [[Jr(r, alpha) / 2.0, i, (i + r) % L] for i in range(L) for r in range(1, L // 2, 1)] ops = dict(Jxy=[[op, Jxy_list] for op in ["+-", "-+"]], Jzz=[["zz", Jzz_list]], Jd=[np.random.normal(0, 1, size=(basis.Ns, basis.Ns))]) op = quantum_operator(ops, basis=basis, dtype=np.float32, matrix_formats=dict(Jzz="dia", Jxy="csr", Jd="dense")) with tempfile.TemporaryDirectory() as tmpdirname:
L = 34 # or 36 with 10 OMP.MKL threads output_str = [] ######## BASIS CONSTRUCTION ######## # # required time scales exponentially with L p = np.arange(L)[::-1] t = (np.arange(L) + 1) % L z = -(np.arange(L) + 1) ti = time.time() basis = spin_basis_general(L, S="1/2", m=0, kblock=(t, 0), pblock=(p, 0), zblock=(z, 0)) tf = time.time() time_basis = tf - ti basis_str = "\nbasis with {0:d} states took {1:0.2f} secs.\n".format( basis.Ns, time_basis) output_str.append(basis_str) print(basis_str) ######## HAMILTONIAN CONSTRUCTION ######## # # required time scales exponentially with L # linear speedup is expected from both OMP and MKL
basis_boson = boson_basis_general(N_2d, make_basis=False, Nb=N_2d // 4, sps=2, **basis_dict) basis_boson_full = boson_basis_general( N_2d, make_basis=False, Nb=N_2d // 4, sps=2, ) basis_spin = spin_basis_general(N_2d, pauli=False, make_basis=False, Nup=N_2d // 2, zblock=(Z, 0), **basis_dict) basis_spin_full = spin_basis_general( N_2d, pauli=False, make_basis=False, Nup=N_2d // 2, ) basis_fermion = spinless_fermion_basis_general(N_2d, make_basis=False, Nf=N_2d // 2, **basis_dict)
def test(Lx,Ly): N = Lx*Ly nmax = int(eval("2*1/2")) sps = nmax+1 tr = square_lattice_trans(Lx,Ly) basis_dict = {} basis_dict_f = {} basis_dict_combined = {} Nups=range(nmax*N+1) for Nup in Nups: basis_blocks=[] basis_blocks_f=[] pcon_basis = spin_basis_general(N,Nup=Nup,pauli=False) pcon_basis_f = spinful_fermion_basis_general(N,Nf=(Nup,N-Nup),double_occupancy=False) Ns_block = 0 for blocks in tr.allowed_blocks_spin_inversion_iter(Nup,sps): basis = spin_basis_general(N,Nup=Nup,pauli=False,**blocks) Ns_block += basis.Ns basis_blocks.append(basis) Ns_block_f = 0 for blocks_f in tr.allowed_blocks_spin_inversion_iter(Nup,sps): # requires simple symmetry definition basis_f = spinful_fermion_basis_general(N,Nf=(Nup,N-Nup),double_occupancy=False,**blocks_f) Ns_block_f += basis_f.Ns basis_blocks_f.append(basis_f) try: assert(Ns_block == pcon_basis.Ns) except AssertionError: print(Nup,Ns_block,pcon_basis.Ns) raise AssertionError("reduced blocks don't sum to particle sector.") try: assert(Ns_block_f == pcon_basis_f.Ns) except AssertionError: print(Nup,Ns_block_f,pcon_basis_f.Ns) raise AssertionError("fermion reduced blocks don't sum to particle sector.") try: assert(Ns_block == pcon_basis_f.Ns) except AssertionError: print(Nup,Ns_block_f,pcon_basis_f.Ns) raise AssertionError("fermion reduced blocks don't match spin blocks.") basis_dict[Nup] = (pcon_basis,basis_blocks) basis_dict_f[Nup] = (pcon_basis_f,basis_blocks_f) basis_dict_combined[Nup] = (pcon_basis,basis_blocks,pcon_basis_f,basis_blocks_f) J = [[1.0,i,tr.T_x[i]] for i in range(N)] + [[1.0,i,tr.T_y[i]] for i in range(N)] J_nn_ij = [[-0.25,i,tr.T_x[i]] for i in range(N)] + [[-0.25,i,tr.T_y[i]] for i in range(N)] J_nn_ji = [[-0.25,tr.T_x[i],i] for i in range(N)] + [[-0.25,tr.T_y[i],i] for i in range(N)] J_nn_ij_p = [[0.25,i,tr.T_x[i]] for i in range(N)] + [[0.25,i,tr.T_y[i]] for i in range(N)] J_nn_ji_p = [[0.25,tr.T_x[i],i] for i in range(N)] + [[0.25,tr.T_y[i],i] for i in range(N)] J_cccc_ij = [[-1.0,i,tr.T_x[i],tr.T_x[i],i] for i in range(N)] + [[-1.0,i,tr.T_y[i],tr.T_y[i],i] for i in range(N)] J_cccc_ji = [[-1.0,tr.T_x[i],i,i,tr.T_x[i]] for i in range(N)] + [[-1.0,tr.T_y[i],i,i,tr.T_y[i]] for i in range(N)] static = [["zz",J],["+-",J],["-+",J]] static_f = [["nn|",J_nn_ij_p],["|nn",J_nn_ji_p],["n|n",J_nn_ij],["n|n",J_nn_ji],["+-|+-",J_cccc_ij],["+-|+-",J_cccc_ji]] E_symm = {} E_symm_f = {} #''' for N,(pcon_basis,basis_blocks,pcon_basis_f,basis_blocks_f) in basis_dict_combined.items(): H_pcon = hamiltonian(static,[],basis=pcon_basis,dtype=np.float64) H_pcon_f = hamiltonian(static_f,[],basis=pcon_basis_f,dtype=np.float64) if H_pcon.Ns>0: E_pcon = np.linalg.eigvalsh(H_pcon.todense()) else: E_pcon = np.array([]) if H_pcon_f.Ns>0: E_pcon_f = np.linalg.eigvalsh(H_pcon_f.todense()) else: E_pcon_f = np.array([]) E_block = [] E_block_f = [] for basis, basis_f in zip(basis_blocks,basis_blocks_f): H = hamiltonian(static,[],basis=basis,dtype=np.complex128) H_f = hamiltonian(static_f,[],basis=basis_f,dtype=np.complex128) if H.Ns>0: E_block.append(np.linalg.eigvalsh(H.todense())) if H_f.Ns>0: E_block_f.append(np.linalg.eigvalsh(H_f.todense())) E_block = np.hstack(E_block) E_block.sort() E_block_f = np.hstack(E_block_f) E_block_f.sort() np.testing.assert_allclose(E_pcon,E_block,atol=1e-13) np.testing.assert_allclose(E_pcon_f,E_block_f,atol=1e-13) np.testing.assert_allclose(E_pcon,E_pcon_f,atol=1e-13) print("passed N={} sector".format(N))
def test_gen_basis_spin(l_max,S="1/2"): L=6 kblocks = [None] kblocks.extend(range(L)) pblocks = [None,0,1] zblocks = [None,0,1] if S=="1/2": ops = ["x","y","z","+","-","I"] else: ops = ["z","+","-","I"] sps,s=S_dict[S] Nups = [None,int(s*L)] t = np.array([(i+1)%L for i in range(L)]) p = np.array([L-i-1 for i in range(L)]) z = np.array([-(i+1) for i in range(L)]) for Nup,kblock,pblock,zblock in product(Nups,kblocks,pblocks,zblocks): gen_blocks = {"pauli":False,"S":S} basis_blocks = {"pauli":False,"S":S} if kblock==0 or kblock==L//2: if pblock is not None: basis_blocks["pblock"] = (-1)**pblock gen_blocks["pblock"] = (p,pblock) else: basis_blocks["pblock"] = None gen_blocks["pblock"] = None else: basis_blocks["pblock"] = None gen_blocks["pblock"] = None if zblock is not None: basis_blocks["zblock"] = (-1)**zblock gen_blocks["zblock"] = (z,zblock) else: basis_blocks["zblock"] = None gen_blocks["zblock"] = None if kblock is not None: basis_blocks["kblock"] = kblock gen_blocks["kblock"] = (t,kblock) else: basis_blocks["kblock"] = None gen_blocks["kblock"] = None basis_1d = spin_basis_1d(L,Nup=Nup,**basis_blocks) gen_basis = spin_basis_general(L,Nup=Nup,**gen_blocks) n = basis_1d._get_norms(np.float64)**2 n_gen = (gen_basis._n.astype(np.float64))*gen_basis._pers.prod() if basis_1d.Ns != gen_basis.Ns: print(L,basis_blocks) print(basis_1d) print(gen_basis) raise ValueError("basis size mismatch") try: np.testing.assert_allclose(basis_1d._basis-gen_basis._basis,0,atol=1e-6) np.testing.assert_allclose(n-n_gen ,0,atol=1e-6) except: print(basis_1d._basis) print(gen_basis._basis) print(n.shape) print(n_gen.shape) raise Exception for l in range(1,l_max+1): for i0 in range(0,L-l+1,1): indx = range(i0,i0+l,1) for opstr in product(*[ops for i in range(l)]): opstr = "".join(list(opstr)) printing = dict(basis_blocks) printing["opstr"]=opstr printing["indx"]=indx printing["Nup"]=Nup printing["S"]=S err_msg="testing: {opstr:} {indx:} S={S:} Nup={Nup:} kblock={kblock:} pblock={pblock:} zblock={zblock:}".format(**printing) check_ME(basis_1d,gen_basis,opstr,indx,np.complex128,err_msg)
def check_gen_basis_hcb(S="1/2"): L=6 kblocks = [None] kblocks.extend(range(L)) pblocks = [None,0,1] zblocks = [None,0,1] sps,s=S_dict[S] Nups = [None,int(s*L)] t = np.array([(i+1)%L for i in range(L)]) p = np.array([L-i-1 for i in range(L)]) z = np.array([-(i+1) for i in range(L)]) for Nup,kblock,pblock,zblock in product(Nups,kblocks,pblocks,zblocks): gen_blocks = {"S":S,"pauli":False} basis_blocks = {"S":S,"pauli":False} dtype=np.complex128 if kblock==0 or kblock==L//2: if pblock is not None: dtype=np.float64 basis_blocks["pblock"] = (-1)**pblock gen_blocks["pblock"] = (p,pblock) else: basis_blocks["pblock"] = None gen_blocks["pblock"] = None else: basis_blocks["pblock"] = None gen_blocks["pblock"] = None if zblock is not None: basis_blocks["zblock"] = (-1)**zblock gen_blocks["zblock"] = (z,zblock) else: basis_blocks["zblock"] = None gen_blocks["zblock"] = None if kblock is not None: basis_blocks["kblock"] = kblock gen_blocks["kblock"] = (t,kblock) else: basis_blocks["kblock"] = None gen_blocks["kblock"] = None print("checking S={S:} Nup={Nup:} kblock={kblock:} pblock={pblock:} zblock={zblock:}".format(Nup=Nup,**basis_blocks)) basis_1d = spin_basis_1d(L,Nup=Nup,**basis_blocks) gen_basis = spin_basis_general(L,Nup=Nup,**gen_blocks) P1 = basis_1d.get_proj(dtype) P2 = gen_basis.get_proj(dtype) np.testing.assert_allclose((P1-P2).data,0,atol=1e-14,err_msg="failed projector") v = np.random.ranf(size=(basis_1d.Ns,)).astype(dtype) vs = np.random.ranf(size=(basis_1d.Ns,100)).astype(dtype) v1 = basis_1d.get_vec(v,sparse=False) v2 = gen_basis.get_vec(v,sparse=False) np.testing.assert_allclose((v1-v2),0,atol=1e-14,err_msg="failed single vector dense") v1 = basis_1d.get_vec(v,sparse=True) v2 = gen_basis.get_vec(v,sparse=True) np.testing.assert_allclose((v1-v2).data,0,atol=1e-14,err_msg="failed single vector sparse") vs1 = basis_1d.get_vec(vs,sparse=False) vs2 = gen_basis.get_vec(vs,sparse=False) np.testing.assert_allclose((vs1-vs2),0,atol=1e-14,err_msg="failed multi vector dense") vs1 = basis_1d.get_vec(vs,sparse=True) vs2 = gen_basis.get_vec(vs,sparse=True) np.testing.assert_allclose((vs1-vs2).data,0,atol=1e-14,err_msg="failed multi vector sparse")
Nb=N_2d//4,sps=2, kxblock=(T_x,0),kyblock=(T_y,0), rblock=(R,0), pxblock=(P_x,0),pyblock=(P_y,0), # zblock=(Z,0) ) basis_boson_full = boson_basis_general(N_2d, make_basis=True, Nb=N_2d//4,sps=2, ) basis_spin = spin_basis_general(N_2d, pauli=False, make_basis=False, Nup=N_2d//2, kxblock=(T_x,0),kyblock=(T_y,0), rblock=(R,0), pxblock=(P_x,0),pyblock=(P_y,0), zblock=(Z,0) ) basis_spin_full = spin_basis_general(N_2d, pauli=False, make_basis=True, Nup=N_2d//2, ) basis_fermion = spinless_fermion_basis_general(N_2d, make_basis=False, Nf=N_2d//2, kxblock=(T_x,0),kyblock=(T_y,0), rblock=(R,0), pxblock=(P_x,0),pyblock=(P_y,0), )
else: np.testing.assert_allclose(pm_1, ratio_12 * pm_2, atol=1e-13) np.testing.assert_allclose(mp_1, ratio_12 * mp_2, atol=1e-13) np.testing.assert_allclose(xx_1, ratio_12 * xx_2, atol=1e-13) np.testing.assert_allclose(yy_1, ratio_12 * yy_2, atol=1e-13) np.testing.assert_allclose(zz_1, ratio_12 * zz_2, atol=1e-13) basis_1d_S = spin_basis_1d(L=L, pauli=0) basis_1d_pauli_1 = spin_basis_1d(L=L) basis_1d_pauli_2 = spin_basis_1d(L=L, pauli=-1) spin_basis_general_S = spin_basis_general(N=L, pauli=0) spin_basis_general_pauli_1 = spin_basis_general(N=L) spin_basis_general_pauli_2 = spin_basis_general(N=L, pauli=-1) bases = [(basis_1d_S, basis_1d_pauli_1, basis_1d_pauli_2), (spin_basis_general_S, spin_basis_general_pauli_1, spin_basis_general_pauli_2)] no_checks = dict(check_herm=False, check_pcon=False, check_symm=False, dtype=np.float64) for basis_S, basis_pauli_1, basis_pauli_2 in bases: # check Op and Op_bra_ket functions