def process_op(old_op: Op): old_ops, _ = old_op.split_elementary(dof_to_siteidx) new_ops = [] for elem_op in old_ops: # move all sigma_z to the start of the operator # and cancel as many as possible n_sigma_z = elem_op.split_symbol.count("sigma_z") n_non_sigma_z = 0 n_permute = 0 for simple_elem_op in elem_op.split_symbol: if simple_elem_op != "sigma_z": n_non_sigma_z += 1 else: n_permute += n_non_sigma_z # remove as many "sigma_z" as possible new_symbol = [s for s in elem_op.split_symbol if s != "sigma_z"] if n_sigma_z % 2 == 1: new_symbol.insert(0, "sigma_z") # this op is identity, discard it if not new_symbol: continue new_qn = [qn_dict[s] for s in new_symbol] new_dof_name = elem_op.dofs[0] new_ops.append( Op(" ".join(new_symbol), new_dof_name, (-1)**n_permute, new_qn)) return Op.product(new_ops)
def intersite(cls, model: HolsteinModel, e_opera: dict, ph_opera: dict, scale: Quantity=Quantity(1.)): r""" construct the inter site MPO Parameters ---------- model : HolsteinModel the molecular information e_opera: the electronic operators. {imol: operator}, such as {1:"a", 3:r"a^\dagger"} ph_opera: the vibrational operators. {(imol, iph): operator}, such as {(0,5):"b"} scale: Quantity scalar to scale the mpo Note ----- the operator index starts from 0,1,2... """ ops = [] for e_key, e_op in e_opera.items(): ops.append(Op(e_op, e_key)) for v_key, v_op in ph_opera.items(): ops.append(Op(v_op, v_key)) op = scale.as_au() * Op.product(ops) return cls(model, op)
def _terms_to_table(model: Model, terms: List[Op], const: float): r""" constructing a general operator table according to model.model and model.order """ table = [] factor_list = [] dummy_table_entry = [Op.identity()] * model.nsite for op in terms: elem_ops, factor = op.split_elementary(model.dof_to_siteidx) table_entry = dummy_table_entry.copy() for elem_op in elem_ops: # it is ensured in `elem_op` every symbol is on the same site site_idx = model.dof_to_siteidx[elem_op.dofs[0]] table_entry[site_idx] = elem_op table.append(table_entry) factor_list.append(factor) # const if const != 0: table_entry = dummy_table_entry.copy() factor_list.append(const) table.append(table_entry) factor_list = np.array(factor_list) logger.debug(f"# of operator terms: {len(table)}") return table, factor_list
def check_reduced_density_matrix(basis): model = Model(basis, []) mps = Mps.random(model, 1, 20) rdm = mps.calc_edof_rdm().real assert np.allclose(np.diag(rdm), mps.e_occupations) # only test a sample. Should be enough. mpo = Mpo(model, Op(r"a^\dagger a", [0, 3])) assert rdm[-1][0] == pytest.approx(mps.expectation(mpo))
def x_square_average(model: Model): """ <x^2> of vibrational DoF """ assert isinstance(model, Model) return { r"x^2": { "x": [Mpo(model, Op("x^2", v_dof)) for v_dof in model.v_dofs] } }
def test_ft(): model = get_model() mpo = Mpo(model) impdm = MpDm.max_entangled_gs(model) impdm.compress_config = CompressConfig(threshold=1e-6) temperature = Quantity(3) evolve_config = EvolveConfig(adaptive=True, guess_dt=-0.001j) tp = ThermalProp(impdm, mpo, evolve_config=evolve_config) tp.evolve(nsteps=1, evolve_time=temperature.to_beta() / 2j) mpdm = tp.latest_mps mpdm = Mpo(model, Op("sigma_x", "spin")).contract(mpdm) mpdm.evolve_config = EvolveConfig(adaptive=True, guess_dt=0.1) time_series = [0] sigma_z_oper = Mpo(model, Op("sigma_z", "spin")) spin = [mpdm.expectation(sigma_z_oper)] for i in range(29): dt = mpdm.evolve_config.guess_dt mpdm = mpdm.evolve(mpo, evolve_dt=dt) time_series.append(time_series[-1] + dt) spin.append(mpdm.expectation(sigma_z_oper)) qutip_res = get_qutip_ft(model, temperature, time_series) assert np.allclose(qutip_res, spin, atol=1e-3)
def onsite(cls, model: Model, opera, dipole=False, dof_set=None): if dof_set is None: if model.n_edofs == 0: raise ValueError("No electronic DoF present in the model.") dof_set = model.e_dofs ops = [] for idx in dof_set: if dipole: factor = model.dipole[idx] else: factor = 1. ops.append(Op(opera, idx, factor)) return cls(model, ops)
def test_zt(): model = get_model() mps = Mps.ground_state(model, False) mps.compress_config = CompressConfig(threshold=1e-6) mps.evolve_config = EvolveConfig(adaptive=True, guess_dt=0.1) mps.use_dummy_qn = True mpo = Mpo(model) time_series = [0] spin = [1] sigma_z_oper = Mpo(model, Op("sigma_z", "spin")) for i in range(30): dt = mps.evolve_config.guess_dt mps = mps.evolve(mpo, evolve_dt=dt) time_series.append(time_series[-1] + dt) spin.append(mps.expectation(sigma_z_oper)) qutip_res = get_qutip_zt(model, time_series) assert np.allclose(qutip_res, spin, atol=1e-3)
def ph_onsite(cls, model: HolsteinModel, opera: str, mol_idx:int, ph_idx=0): assert opera in ["b", r"b^\dagger", r"b^\dagger b"] if not isinstance(model, HolsteinModel): raise TypeError("ph_onsite only supports HolsteinModel") return cls(model, Op(opera, (mol_idx, ph_idx)))
def symbolic_mpo(table, factor, algo="Hopcroft-Karp"): r""" A General Compact (Symbolic) MPO Construction Routine Args: table: an operator table with shape (operator nterm, nsite). Each entry contains elementary operators on each site. factor (np.ndarray): one prefactor vector (dim: operator nterm) algo: the algorithm used to select local ops, "Hopcroft-Karp"(default), "Hungarian". They are both global optimal and have only minor performance difference. Note: op with the same op.symbol must have the same op.qn and op.factor Return: mpo: symbolic mpo mpoqn: quantum number qntot: total quantum number of the operator qnidx: the index of the qn The idea: the index of the primary ops {0:"I", 1:"a", 2:r"a^\dagger"} for example: H = 2.0 * a_1 a_2^dagger + 3.0 * a_2^\dagger a_3 + 4.0*a_0^\dagger a_3 The column names are the site indices with 0 and 4 imaginary (see the note below) and the content of the table is the index of primary operators. s0 s1 s2 s3 s4 factor a_1 a_2^dagger 0 1 2 0 0 2.0 a_2^\dagger a_3 0 0 2 1 0 3.0 a_1^\dagger a_3 0 2 0 1 0 4.0 for convenience the first and last column mean that the operator of the left and right hand of the system is I cut the string to construct the row(left) and column(right) operator and find the duplicated/independent terms s0 s1 | s2 s3 s4 factor a_1 a_2^dagger 0 1 | 2 0 0 2.0 a_2^\dagger a_3 0 0 | 2 1 0 3.0 a_1^\dagger a_3 0 2 | 0 1 0 4.0 The content of the table below means matrix elements with basis explained in the notes. In the matrix elements, 1 means combination and 0 means no combination. (2,0,0) (2,1,0) (0,1,0) -> right side of the above table (0,1) 1 0 0 (0,0) 0 1 0 (0,2) 0 0 1 | v left side of the above table In this case all operators are independent so the content of the matrix is diagonal and select the terms and rearrange the table The selection rule is to find the minimal number of rows+cols that can eliminate the matrix s1 s2 | s3 s4 factor a_1 a_2^dagger 0' 2 | 0 0 2.0 a_2^\dagger a_3 1' 2 | 1 0 3.0 a_1^\dagger a_3 2' 0 | 1 0 4.0 0'/1'/2' are three new operators(could be non-elementary) The local mpo is the transformation matrix between 0,0,0 to 0',1',2'. In this case, the local mpo is simply (1, 0, 2) cut the string and find the duplicated/independent terms (0,0), (1,0) (0',2) 1 0 (1',2) 0 1 (2',0) 0 1 and select the terms and rearrange the table apparently choose the (1,0) column and construct the complementary operator (1',2)+(2',0) is better 0'' = 3.0 * (1', 2) + 4.0 * (2', 0) s2 s3 | s4 factor (4.0 * a_1^dagger + 3.0 * a_2^dagger) a_3 0'' 1 | 0 1.0 a_1 a_2^dagger 1'' 0 | 0 2.0 0''/1'' are another two new operators(non-elementary) The local mpo is the transformation matrix between 0',1',2' to 0'',1'' (0) (0'',1) 1 (1'',0) 1 The local mpo is the transformation matrix between 0'',1'' to 0''' """ logger.debug(f"symbolic mpo algorithm: {algo}") # use np.uint32, np.uint16 to save memory max_uint32 = np.iinfo(np.uint32).max max_uint16 = np.iinfo(np.uint16).max nsite = len(table[0]) logger.debug(f"Input operator terms: {len(table)}") # Simplest case. Cut to the chase if len(table) == 1: # The first layer: number of sites. The 2nd and 3rd layer: in and out virtual bond # the 4th layer: operator sums mpo: List[List[List[List[[Op]]]]] = [] mpoqn = [[0]] for op in table[0]: mpo.append([[[op]]]) qn = mpoqn[-1][0] + op.qn mpoqn.append([qn]) old_op = mpo[-1][0][0][0] mpo[-1][0][0][0] = Op(old_op.symbol, old_op.dofs, old_op.factor * factor[0], old_op.qn_list) qntot = qn mpoqn[-1] = [0] qnidx = len(mpo) - 1 return mpo, mpoqn, qntot, qnidx # translate the symbolic operator table to an easy to manipulate numpy array # extract the op symbol, qn, factor to a numpy array # np.array can't handle tuple as array element. So use dict instead def op_to_dict(op: Op): return {0: op.symbol, 1: tuple(op.dofs)} symbol_table = np.array([[op_to_dict(x) for x in ta] for ta in table]) qn_table = np.array([[x.qn for x in ta] for ta in table]) factor_table = np.array([[x.factor for x in ta] for ta in table]) new_table = np.zeros((len(table), nsite),dtype=np.uint16) # Construct mapping from easy-to-manipulate integer to actual Op primary_ops = {} # unique operators with DoF names taken into consideration # The inclusion of DoF names is necessary fo multi-dof basis. unique_op: Set[Op] = set(np.array(table).ravel()) # check the index of different operators could be represented with np.uint16 assert len(unique_op) < max_uint16 for idx, op in enumerate(unique_op): coord = np.nonzero(symbol_table == op_to_dict(op)) # check that op with the same symbol has the same factor and qn assert np.unique(qn_table[coord]).size == 1 assert np.all(factor_table[coord] == factor_table[coord][0]) new_table[coord] = idx primary_ops[idx] = table[coord[0][0]][coord[1][0]] del symbol_table, factor_table, qn_table, unique_op # combine the same terms but with different factors(add them together) unique_term, unique_inverse = np.unique(new_table, axis=0, return_inverse=True) # it is efficient to vectorize the operation that moves the rows and cols # and sum them together coord = np.array([[newidx, oldidx] for oldidx, newidx in enumerate(unique_inverse)]) mask = scipy.sparse.csr_matrix((np.ones(len(coord)), (coord[:,0], coord[:,1]))) factor = mask.dot(factor) # add the first and last column for convenience ta = np.zeros((unique_term.shape[0],1),dtype=np.uint16) table = np.concatenate((ta, unique_term, ta), axis=1) logger.debug(f"After combination of the same terms: {table.shape[0]}") # check the index of interaction could be represented with np.uint32 assert table.shape[0] < max_uint32 del unique_term, unique_inverse mpo = [] mpoqn = [[0]] # The `Op` class is transformed to a light-weight named tuple # for better performance OpTuple = namedtuple("OpTuple", ["symbol", "qn", "factor"]) # 0 represents the identity symbol. Identity might not present # in `primary_ops` but the algorithm still works. in_ops = [[OpTuple([0], qn=0, factor=1)]] for isite in range(nsite): # split table into the row and col part term_row, row_unique_inverse = np.unique(table[:,:2], axis=0, return_inverse=True) term_col, col_unique_inverse = np.unique(table[:,2:], axis=0, return_inverse=True) # get the non_redudant ops # the +1 trick is to use the csr sparse matrix format non_red = scipy.sparse.diags(np.arange(1,table.shape[0]+1), format="csr", dtype=np.uint32) coord = np.array([[newidx, oldidx] for oldidx, newidx in enumerate(row_unique_inverse)]) mask = scipy.sparse.csr_matrix((np.ones(len(coord), dtype=np.uint32), (coord[:,0], coord[:,1]))) non_red = mask.dot(non_red) coord = np.array([[oldidx, newidx] for oldidx, newidx in enumerate(col_unique_inverse)]) mask = scipy.sparse.csr_matrix((np.ones(len(coord), dtype=np.uint32), (coord[:,0], coord[:,1]))) non_red = non_red.dot(mask) # use sparse matrix to represent non_red will be inefficient a little # bit compared to dense matrix, but saves a lot of memory when the # number of terms is huge # logger.info(f"isite: {isite}, bipartite graph size: {non_red.shape}") # select the reserved ops out_ops = [] new_table = [] new_factor = [] bigraph = [] if non_red.shape[0] < non_red.shape[1]: for i in range(non_red.shape[0]): bigraph.append(non_red.indices[non_red.indptr[i]:non_red.indptr[i+1]]) rowbool, colbool = bipartite_vertex_cover(bigraph, algo=algo) else: non_red_csc = non_red.tocsc() for i in range(non_red.shape[1]): bigraph.append(non_red_csc.indices[non_red_csc.indptr[i]:non_red_csc.indptr[i+1]]) colbool, rowbool = bipartite_vertex_cover(bigraph, algo=algo) row_select = np.nonzero(rowbool)[0] col_select = np.nonzero(colbool)[0] if len(row_select) > 0: assert np.amax(row_select) < non_red.shape[0] if len(col_select) > 0: assert np.amax(col_select) < non_red.shape[1] for row_idx in row_select: # construct out_op # dealing with row (left side of the table). One row corresponds to multiple cols. # Produce one out operator and multiple new_table entries symbol = term_row[row_idx] qn = in_ops[term_row[row_idx][0]][0].qn + primary_ops[term_row[row_idx][1]].qn out_op = OpTuple(symbol, qn, factor=1.0) out_ops.append([out_op]) col_link = non_red.indices[non_red.indptr[row_idx]:non_red.indptr[row_idx+1]] stack = np.array([len(out_ops)-1]*len(col_link), dtype=np.uint16).reshape(-1,1) new_table.append(np.hstack((stack,term_col[col_link]))) new_factor.append(factor[non_red[row_idx, col_link].toarray()-1]) non_red.data[non_red.indptr[row_idx]:non_red.indptr[row_idx+1]] = 0 non_red.eliminate_zeros() nonzero_row_idx, nonzero_col_idx = non_red.nonzero() for col_idx in col_select: out_ops.append([]) # complementary operator # dealing with column (right side of the table). One col correspond to multiple rows. # Produce multiple out operators and one new_table entry non_red_one_col = non_red[:, col_idx].toarray().flatten() for i in nonzero_row_idx[np.nonzero(nonzero_col_idx == col_idx)[0]]: symbol = term_row[i] qn = in_ops[term_row[i][0]][0].qn + primary_ops[term_row[i][1]].qn out_op = OpTuple(symbol, qn, factor=factor[non_red_one_col[i]-1]) out_ops[-1].append(out_op) new_table.append(np.array([len(out_ops)-1] + list(term_col[col_idx]), dtype=np.uint16).reshape(1,-1)) new_factor.append(1.0) # it is not necessary to remove the column nonzero elements #non_red[:, col_idx] = 0 #non_red.eliminate_zeros() # translate the numpy array back to symbolic mpo mo = [[[] for o in range(len(out_ops))] for i in range(len(in_ops))] moqn = [] for iop, out_op in enumerate(out_ops): for composed_op in out_op: in_idx = composed_op.symbol[0] op = primary_ops[composed_op.symbol[1]] if isite != nsite-1: factor = composed_op.factor else: factor = composed_op.factor*new_factor[0] mo[in_idx][iop].append(factor * op) moqn.append(out_op[0].qn) mpo.append(mo) mpoqn.append(moqn) # reconstruct the table in new operator table = np.concatenate(new_table) # check the number of incoming operators could be represent as np.uint16 assert len(out_ops) <= max_uint16 factor = np.concatenate(new_factor, axis=None) #debug #logger.debug(f"in_ops: {in_ops}") #logger.debug(f"out_ops: {out_ops}") #logger.debug(f"new_factor: {new_factor}") in_ops = out_ops qntot = mpoqn[-1][0] mpoqn[-1] = [0] qnidx = len(mpo)-1 return mpo, mpoqn, qntot, qnidx
def qc_model(h1e, h2e): """ Ab initio electronic Hamiltonian in spin-orbitals h1e: sh above h2e: aseri above return model: "e_0", "e_1"... is according to the orbital index in sh and aseri """ #------------------------------------------------------------------------ # Jordan-Wigner transformation maps fermion problem into spin problem # # |0> => |alpha> and |1> => |beta >: # # a_j => Prod_{l=0}^{j-1}(sigma_z[l]) * sigma_+[j] # a_j^+ => Prod_{l=0}^{j-1}(sigma_z[l]) * sigma_-[j] # j starts from 0 as in computer science convention to be consistent # with the following code. #------------------------------------------------------------------------ norbs = h1e.shape[0] logger.info(f"spin norbs: {norbs}") assert np.all(np.array(h1e.shape) == norbs) assert np.all(np.array(h2e.shape) == norbs) # construct electronic creation/annihilation operators by Jordan-Wigner transformation a_ops = [] a_dag_ops = [] for j in range(norbs): # qn for each op will be processed in `process_op` sigma_z_list = [Op("sigma_z", l) for l in range(j)] a_ops.append(Op.product(sigma_z_list + [Op("sigma_+", j)])) a_dag_ops.append(Op.product(sigma_z_list + [Op("sigma_-", j)])) ham_terms = [] # helper function to process operators. # Remove "sigma_z sigma_z". Use {sigma_z, sigma_+} = 0 # and {sigma_z, sigma_-} = 0 to simplify operators, # and set quantum number dof_to_siteidx = dict(zip(range(norbs), range(norbs))) qn_dict = {"sigma_+": -1, "sigma_-": 1, "sigma_z": 0} def process_op(old_op: Op): old_ops, _ = old_op.split_elementary(dof_to_siteidx) new_ops = [] for elem_op in old_ops: # move all sigma_z to the start of the operator # and cancel as many as possible n_sigma_z = elem_op.split_symbol.count("sigma_z") n_non_sigma_z = 0 n_permute = 0 for simple_elem_op in elem_op.split_symbol: if simple_elem_op != "sigma_z": n_non_sigma_z += 1 else: n_permute += n_non_sigma_z # remove as many "sigma_z" as possible new_symbol = [s for s in elem_op.split_symbol if s != "sigma_z"] if n_sigma_z % 2 == 1: new_symbol.insert(0, "sigma_z") # this op is identity, discard it if not new_symbol: continue new_qn = [qn_dict[s] for s in new_symbol] new_dof_name = elem_op.dofs[0] new_ops.append( Op(" ".join(new_symbol), new_dof_name, (-1)**n_permute, new_qn)) return Op.product(new_ops) # 1-e terms for p, q in itertools.product(range(norbs), repeat=2): op = process_op(a_dag_ops[p] * a_ops[q]) ham_terms.append(op * h1e[p, q]) # 2-e terms. for q, s in itertools.product(range(norbs), repeat=2): for p, r in itertools.product(range(q), range(s)): op = process_op( Op.product([a_dag_ops[p], a_dag_ops[q], a_ops[r], a_ops[s]])) ham_terms.append(op * h2e[p, q, r, s]) basis = [BasisHalfSpin(iorb, sigmaqn=[0, 1]) for iorb in range(norbs)] return basis, ham_terms
def construct_vibronic_model(multi_e, dvr): r""" Bi-linear vibronic coupling model for Pyrazine, 4 modes See: Raab, Worth, Meyer, Cederbaum. J.Chem.Phys. 110 (1999) 936 The parameters are from heidelberg mctdh package pyr4+.op """ # frequencies w10a = 0.1139 * ev2au w6a = 0.0739 * ev2au w1 = 0.1258 * ev2au w9a = 0.1525 * ev2au # energy-gap delta = 0.42300 * ev2au # linear, on-diagonal coupling coefficients # H(1,1) _6a_s1_s1 = 0.09806 * ev2au _1_s1_s1 = 0.05033 * ev2au _9a_s1_s1 = 0.14521 * ev2au # H(2,2) _6a_s2_s2 = -0.13545 * ev2au _1_s2_s2 = 0.17100 * ev2au _9a_s2_s2 = 0.03746 * ev2au # quadratic, on-diagonal coupling coefficients # H(1,1) _10a_10a_s1_s1 = -0.01159 * ev2au _6a_6a_s1_s1 = 0.00000 * ev2au _1_1_s1_s1 = 0.00000 * ev2au _9a_9a_s1_s1 = 0.00000 * ev2au # H(2,2) _10a_10a_s2_s2 = -0.01159 * ev2au _6a_6a_s2_s2 = 0.00000 * ev2au _1_1_s2_s2 = 0.00000 * ev2au _9a_9a_s2_s2 = 0.00000 * ev2au # bilinear, on-diagonal coupling coefficients # H(1,1) _6a_1_s1_s1 = 0.00108 * ev2au _1_9a_s1_s1 = -0.00474 * ev2au _6a_9a_s1_s1 = 0.00204 * ev2au # H(2,2) _6a_1_s2_s2 = -0.00298 * ev2au _1_9a_s2_s2 = -0.00155 * ev2au _6a_9a_s2_s2 = 0.00189 * ev2au # linear, off-diagonal coupling coefficients _10a_s1_s2 = 0.20804 * ev2au # bilinear, off-diagonal coupling coefficients # H(1,2) and H(2,1) _1_10a_s1_s2 = 0.00553 * ev2au _6a_10a_s1_s2 = 0.01000 * ev2au _9a_10a_s1_s2 = 0.00126 * ev2au ham_terms = [] e_list = ["s1", "s2"] v_list = ["10a", "6a", "9a", "1"] for (e_isymbol, e_jsymbol) in product(e_list, repeat=2): e_op = Op(r"a^\dagger a", [e_isymbol, e_jsymbol]) for (v_isymbol, v_jsymbol) in product(v_list, repeat=2): # linear if v_isymbol == v_jsymbol: # if one of the permutation is defined, then the `e_idx_tuple` term should # be defined as required by Hermitian Hamiltonian for eterm1, eterm2 in permut([e_isymbol, e_jsymbol], 2): factor = locals().get(f"_{v_isymbol}_{eterm1}_{eterm2}") if factor is not None: factor *= np.sqrt(eval(f"w{v_isymbol}")) ham_terms.append(e_op * Op("x", v_isymbol) * factor) logger.debug( f"term: {v_isymbol}_{e_isymbol}_{e_jsymbol}") break else: logger.debug( f"no term: {v_isymbol}_{e_isymbol}_{e_jsymbol}") # quadratic # use product to guarantee `break` breaks the whole loop it = product(permut([v_isymbol, v_jsymbol], 2), permut([e_isymbol, e_jsymbol], 2)) for (vterm1, vterm2), (eterm1, eterm2) in it: factor = locals().get(f"_{vterm1}_{vterm2}_{eterm1}_{eterm2}") if factor is not None: factor *= np.sqrt( eval(f"w{v_isymbol}") * eval(f"w{v_jsymbol}")) if vterm1 == vterm2: v_op = Op("x^2", vterm1) else: v_op = Op("x", vterm1) * Op("x", vterm2) ham_terms.append(e_op * v_op * factor) logger.debug( f"term: {v_isymbol}_{v_jsymbol}_{e_isymbol}_{e_jsymbol}" ) break else: logger.debug( f"no term: {v_isymbol}_{v_jsymbol}_{e_isymbol}_{e_jsymbol}" ) # electronic coupling ham_terms.append(Op(r"a^\dagger a", "s1", -delta, [0, 0])) ham_terms.append(Op(r"a^\dagger a", "s2", delta, [0, 0])) # vibrational kinetic and potential for v_isymbol in v_list: ham_terms.extend([ Op("p^2", v_isymbol, 0.5), Op("x^2", v_isymbol, 0.5 * eval("w" + v_isymbol)**2) ]) for term in ham_terms: logger.info(term) basis = [] if not multi_e: for e_isymbol in e_list: basis.append(ba.BasisSimpleElectron(e_isymbol)) else: basis.append(ba.BasisMultiElectron(e_list, [0, 0])) for v_isymbol in v_list: basis.append( ba.BasisSHO(v_isymbol, locals()[f"w{v_isymbol}"], 30, dvr=dvr)) logger.info(f"basis:{basis}") return basis, ham_terms
def photoelectron_operator(idx): # norbs is the spin orbitals # green function op_list = [Op("sigma_z", iorb, qn=0) for iorb in range(idx)] return Op.product(op_list + [Op("sigma_+", idx, qn=-1)])
def x_average(model: Model): """ <x> of vibrational DoF """ return {"x": [Mpo(model, Op("x", v_dof)) for v_dof in model.v_dofs]}
def test_peierls_kubo(): # number of mol n = 4 # electronic coupling V = -Quantity(120, "meV").as_au() # intermolecular vibration freq omega = Quantity(50, "cm-1").as_au() # intermolecular coupling constant g = 4 # number of quanta nlevels = 2 # temperature temperature = Quantity(300, "K") # the Peierls model ham_terms = [] for i in range(n): i1, i2 = i, (i + 1) % n # H_e hop1 = Op(r"a^\dagger a", [i1, i2], V) hop2 = Op(r"a a^\dagger", [i1, i2], V) ham_terms.extend([hop1, hop2]) # H_ph ham_terms.append(Op(r"b^\dagger b", (i, 0), omega)) # H_(e-ph) coup1 = Op(r"b^\dagger + b", (i, 0)) * Op(r"a^\dagger a", [i1, i2]) * g * omega coup2 = Op(r"b^\dagger + b", (i, 0)) * Op(r"a a^\dagger", [i1, i2]) * g * omega ham_terms.extend([coup1, coup2]) basis = [] for ni in range(n): basis.append(BasisSimpleElectron(ni)) basis.append(BasisSHO((ni, 0), omega, nlevels)) model = Model(basis, ham_terms) compress_config = CompressConfig(CompressCriteria.fixed, max_bonddim=24) ievolve_config = EvolveConfig(EvolveMethod.tdvp_vmf, ivp_atol=1e-3, ivp_rtol=1e-5) evolve_config = EvolveConfig(EvolveMethod.tdvp_vmf, ivp_atol=1e-3, ivp_rtol=1e-5) kubo = TransportKubo(model, temperature, compress_config=compress_config, ievolve_config=ievolve_config, evolve_config=evolve_config) kubo.evolve(nsteps=5, evolve_time=1000) qutip_corr, qutip_corr_decomp = get_qutip_peierls_kubo( V, n, nlevels, omega, g, temperature, kubo.evolve_times_array) atol = 1e-7 rtol = 5e-2 # direct comparison may fail because of different sign assert np.allclose(kubo.auto_corr, qutip_corr, atol=atol, rtol=rtol) assert np.allclose(kubo.auto_corr_decomposition, qutip_corr_decomp, atol=atol, rtol=rtol)