def apply(self, mp: MpDmFull, canonicalise=False): assert mp.is_mpdm assert not canonicalise no_dissipation = self.h_mpo.contract(mp) - mp.contract(self.h_mpo) if self.dissipation == 0: return no_dissipation # create and destroy operators pm_operators: List[Tuple[Mpo, Mpo]] = mp.mol_list.get_mpos( "lindblad_pm", calc_lindblad_pm) applied_terms = [] for b, b_dag in pm_operators: res = b.apply(mp).apply(b_dag) for mt in res: if mt.nearly_zero(): # discard vacuum states break else: applied_terms.append(res) if len(applied_terms) == 0: assert mp.ph_occupations.sum() == 0 return no_dissipation summed_term = compressed_sum(applied_terms) bdb_operator: Mpo = mp.mol_list.get_mpos("lindblad_bdb", calc_lindblad_bdb) # any room for optimization? are there any simple relations between the two terms? try: lindblad = summed_term - 0.5 * (bdb_operator.contract(mp, True) + mp.contract(bdb_operator, True)) except EmptyMatrixError: lindblad = summed_term ret = no_dissipation + 1j * self.dissipation * lindblad return ret
def calc_lindblad_bdb(mol_list): ph_operators = [] for imol, m in enumerate(mol_list): for jph in range(m.n_dmrg_phs): bdb = Mpo.ph_onsite(mol_list, r"b^\dagger b", imol, jph) bdb.set_threshold(1e-5) ph_operators.append(bdb) #from functools import reduce #return reduce(lambda mps1, mps2: mps1.add(mps2), ph_operators) return compressed_sum(ph_operators)
def _construct_flux_operator(self): # construct flux operator logger.debug("constructing flux operator") j_list = [] for i in range(len(self.mol_list) - 1): j1 = Mpo.displacement(self.mol_list, i, i + 1).scale(self.mol_list.j_matrix[i, i + 1]) j1.compress_config.threshold = 1e-5 j2 = j1.conj_trans().scale(-1) j_list.extend([j1, j2]) j_oper = compressed_sum(j_list, batchsize=10) logger.debug(f"operator bond dim: {j_oper.bond_dims}") return j_oper
def _construct_flux_operator(self): # construct flux operator logger.info("constructing 1-d Holstein model flux operator ") if isinstance(self.mol_list, MolList): if self.mol_list.periodic: itera = range(len(self.mol_list)) else: itera = range(len(self.mol_list) - 1) j_list = [] for i in itera: conne = (i + 1) % len(self.mol_list) # connect site index j1 = Mpo.intersite(self.mol_list, { i: r"a", conne: r"a^\dagger" }, {}, Quantity(self.mol_list.j_matrix[i, conne])) j1.compress_config.threshold = 1e-8 j2 = j1.conj_trans().scale(-1) j_list.extend([j1, j2]) j_oper = compressed_sum(j_list, batchsize=10) elif isinstance(self.mol_list, MolList2): e_nsite = self.mol_list.n_edofs model = {} for i in range(e_nsite): conne = (i + 1) % e_nsite # connect site index model[(f"e_{i}", f"e_{conne}")] = [ (Op(r"a^\dagger", 1), Op("a", -1), -self.mol_list.j_matrix[i, conne]), (Op(r"a", -1), Op(r"a^\dagger", 1), self.mol_list.j_matrix[conne, i]) ] j_oper = Mpo.general_mpo( self.mol_list, model=model, model_translator=ModelTranslator.general_model) else: assert False logger.debug(f"flux operator bond dim: {j_oper.bond_dims}") return j_oper
def _evolve_dmrg_prop_and_compress(self, mpo, evolve_dt, config: EvolveConfig = None) -> "Mps": if config is None: config = self.evolve_config assert evolve_dt is not None propagation_c = config.rk_config.coeff termlist = [self] # don't let bond dim grow when contracting orig_compress_config = self.compress_config contract_compress_config = self.compress_config.copy() if contract_compress_config.criteria is CompressCriteria.threshold: contract_compress_config.criteria = CompressCriteria.both contract_compress_config.min_dims = None contract_compress_config.max_dims = np.array(self.bond_dims) + 4 self.compress_config = contract_compress_config while len(termlist) < len(propagation_c): termlist.append(mpo.contract(termlist[-1])) # bond dim can grow after adding for t in termlist: t.compress_config = orig_compress_config if config.adaptive: config.check_valid_dt(evolve_dt) while True: scaled_termlist = [] for idx, term in enumerate(termlist): scale = (-1.0j * config.evolve_dt) ** idx * propagation_c[idx] scaled_termlist.append(term.scale(scale)) del term new_mps1 = compressed_sum(scaled_termlist[:-1])._dmrg_normalize() new_mps2 = compressed_sum([new_mps1, scaled_termlist[-1]])._dmrg_normalize() angle = new_mps1.angle(new_mps2) energy1 = self.expectation(mpo) energy2 = new_mps1.expectation(mpo) rtol = config.adaptive_rtol # default to 1e-3 p = (rtol / (np.sqrt(2 * abs(1 - angle)) + 1e-30)) ** 0.2 * 0.8 logger.debug(f"angle: {angle}. e1: {energy1}. e2: {energy2}, p: {p}") d_energy = config.d_energy if abs(energy1 - energy2) < d_energy and 0.5 < p: # converged if abs(config.evolve_dt - evolve_dt) / abs(evolve_dt) < 1e-5: # equal evolve_dt if abs(energy1 - energy2) < (d_energy/10) and 1.1 < p: # a larger dt could be used config.evolve_dt *= min(p, 1.5) logger.debug( f"evolution easily converged, new evolve_dt: {config.evolve_dt}" ) # First exit new_mps2.evolve_config.evolve_dt = config.evolve_dt return new_mps2 if abs(config.evolve_dt) < abs(evolve_dt): # step smaller than required new_dt = evolve_dt - config.evolve_dt logger.debug(f"remaining: {new_dt}") # Second exit new_mps2.evolve_config.evolve_dt = config.evolve_dt del new_mps1, termlist, scaled_termlist # memory consuming and not useful anymore return new_mps2._evolve_dmrg_prop_and_compress(mpo, new_dt, config) else: # shouldn't happen. raise ValueError( f"evolve_dt in config: {config.evolve_dt}, in arg: {evolve_dt}" ) else: # not converged config.evolve_dt /= 2 logger.debug( f"evolution not converged, new evolve_dt: {config.evolve_dt}" ) else: for idx, term in enumerate(termlist): term.scale( (-1.0j * evolve_dt) ** idx * propagation_c[idx], inplace=True ) return compressed_sum(termlist)
def analysis_dominant_config(self, thresh=0.8, alias=None, tda_m_trunc=20, return_compressed_mps=False): r""" analyze the dominant configuration of each tda root. The algorithm is to compress the tda wavefunction to a rank-1 Hartree state and get the ci coefficient of the largest configuration. Then, the configuration is subtracted from the tda wavefunction and redo the first step to get the second largest configuration. The two steps continue until the thresh is achieved. Parameters ---------- thresh: float, optional the threshold to stop the analysis procedure of each root. :math:`\sum_i |c_i|^2 > thresh`. Default is 0.8. alias: dict, optional The alias of each site. For example, ``alias={0:"v_0", 1:"v_2", 2:"v_1"}``. Default is `None`. tda_m_trunc: int, optional the ``m`` to compress a tda wavefunction. Default is 20. return_compressed_mps: bool, optional If ``True``, return the tda excited state as a single compressed mps. Default is `False`. Returns ------- configs: dict The dominant configration of each root. ``configs = {0:[(config0, config_name0, ci_coeff0),(config1, config_name1, ci_coeff1),...], 1:...}`` compressed_mps: List[renormalizer.mps.Mps] see the description in ``return_compressed_mps``. Note ---- The compressed_mps is an approximation of the tda wavefunction with ``m=tda_m_trunc``. """ mps_l_cano, mps_r_cano, tangent_u, tda_coeff_list = self.wfn if alias is not None: assert len(alias) == mps_l_cano.site_num compressed_mps = [] for iroot in range(self.nroots): logger.info(f"iroot: {iroot}") tda_coeff = tda_coeff_list[iroot] mps_tangent_list = [] weight = [] for ims in range(mps_l_cano.site_num): if tangent_u[ims] is None: assert tda_coeff[ims] is None continue weight.append(np.sum(tda_coeff[ims]**2)) mps_tangent = merge(mps_l_cano, mps_r_cano, ims+1) mps_tangent[ims] = asnumpy(tensordot(tangent_u[ims], tda_coeff[ims],[-1,0])) mps_tangent_list.append(mps_tangent) assert np.allclose(np.sum(weight), 1) # sort the mps_tangent from large weight to small weight mps_tangent_list = [mps_tangent_list[i] for i in np.argsort(weight,axis=None)[::-1]] coeff_square_sum = 0 mps_delete = None config_visited = [] while coeff_square_sum < thresh: if mps_delete is None: # first compress it to M=tda_m_trunc mps_rank1 = compressed_sum(mps_tangent_list, batchsize=5, temp_m_trunc=tda_m_trunc) else: mps_rank1 = compressed_sum([mps_delete] + mps_tangent_list, batchsize=5, temp_m_trunc=tda_m_trunc) if coeff_square_sum == 0 and return_compressed_mps: compressed_mps.append(mps_rank1.copy()) mps_rank1 = mps_rank1.canonicalise().compress(temp_m_trunc=1) # get config with the largest coeff config = [] for ims, ms in enumerate(mps_rank1): ms = ms.array.flatten()**2 quanta = int(np.argmax(ms)) config.append(quanta) # check if the config has been visited if config in config_visited: break config_visited.append(config) ci_coeff_list = [] for mps_tangent in mps_tangent_list: sentinel = xp.ones((1,1)) for ims, ms in enumerate(mps_tangent): sentinel = sentinel.dot(asxp(ms[:,config[ims],:])) ci_coeff_list.append(float(sentinel[0,0])) ci_coeff = np.sum(ci_coeff_list) coeff_square_sum += ci_coeff**2 if alias is not None: config_name = [f"{quanta}"+f"{alias[isite]}" for isite, quanta in enumerate(config) if quanta != 0] config_name = " ".join(config_name) self.configs[iroot].append((config, config_name, ci_coeff)) logger.info(f"config: {config}, {config_name}") else: self.configs[iroot].append((config, ci_coeff)) logger.info(f"config: {config}") logger.info(f"ci_coeff: {ci_coeff}, weight:{ci_coeff**2}") condition = {dof:config[idof] for idof, dof in enumerate(self.model.dofs)} mps_delete_increment = Mps.hartree_product_state(self.model, condition).scale(-ci_coeff) if mps_delete is None: mps_delete = mps_delete_increment else: mps_delete = mps_delete + mps_delete_increment logger.info(f"coeff_square_sum: {coeff_square_sum}") return self.configs, compressed_mps