def check_rortho(self): """ check R-orthogonal """ tensm = self.array.reshape([self.shape[0], np.prod(self.shape[1:])]) s = xp.dot(tensm, tensm.T.conj()) return allclose(s, xp.eye(s.shape[0]), atol=1e-3)
def check_lortho(self): """ check L-orthogonal """ tensm = self.array.reshape([np.prod(self.shape[:-1]), self.shape[-1]]) s = xp.dot(tensm.T.conj(), tensm) return allclose(s, xp.eye(s.shape[0]), atol=1e-3)
def _call_impl(self, t): x = (t - self.t_old) / self.h if t.ndim == 0: p = xp.tile(x, self.order + 1) p = xp.cumprod(p) else: p = xp.tile(x, (self.order + 1, 1)) p = xp.cumprod(p, axis=0) y = self.h * xp.dot(self.Q, p) if y.ndim == 2: y += self.y_old[:, None] else: y += self.y_old return y
def rk_step(fun, t, y, f, h, A, B, C, E, K): """Perform a single Runge-Kutta step. This function computes a prediction of an explicit Runge-Kutta method and also estimates the error of a less accurate method. Notation for Butcher tableau is as in [1]_. Parameters ---------- fun : callable Right-hand side of the system. t : float Current time. y : ndarray, shape (n,) Current state. f : ndarray, shape (n,) Current value of the derivative, i.e. ``fun(x, y)``. h : float Step to use. A : list of ndarray, length n_stages - 1 Coefficients for combining previous RK stages to compute the next stage. For explicit methods the coefficients above the main diagonal are zeros, so `A` is stored as a list of arrays of increasing lengths. The first stage is always just `f`, thus no coefficients for it are required. B : ndarray, shape (n_stages,) Coefficients for combining RK stages for computing the final prediction. C : ndarray, shape (n_stages - 1,) Coefficients for incrementing time for consecutive RK stages. The value for the first stage is always zero, thus it is not stored. E : ndarray, shape (n_stages + 1,) Coefficients for estimating the error of a less accurate method. They are computed as the difference between b's in an extended tableau. K : ndarray, shape (n_stages + 1, n) Storage array for putting RK stages here. Stages are stored in rows. Returns ------- y_new : ndarray, shape (n,) Solution at t + h computed with a higher accuracy. f_new : ndarray, shape (n,) Derivative ``fun(t + h, y_new)``. error : ndarray, shape (n,) Error estimate of a less accurate method. References ---------- .. [1] E. Hairer, S. P. Norsett G. Wanner, "Solving Ordinary Differential Equations I: Nonstiff Problems", Sec. II.4. """ K[0] = f for s, (a, c) in enumerate(zip(A, C)): dy = xp.dot(K[:s + 1].T, a) * h K[s + 1] = fun(t + c * h, y + dy) y_new = y + h * xp.dot(K[:-1].T, B) f_new = fun(t + h, y_new) K[-1] = f_new error = xp.dot(K.T, E) * h return y_new, f_new, error
def optimize_cv(self, lr_group, isite, percent=0.0): # depending on the spectratype, to restrict the exction first_LR = lr_group[0] second_LR = lr_group[1] constrain_qn = self.cv_mps.qntot # this function aims at solving the work equation of ZT CV-DMRG # L = <CV|op_a|CV>+2\eta<op_b|CV>, take a derivative to local CV # S-a-S-e-S S-a-S-d-S # | d | | | | # O-b-O-g-O * CV[isite-1] = -\eta | c | # | f | | | | # S-c- -h-S S-b- -e-S # note to be a_mat * x = vec_b # the environment matrix if self.method == "1site": cidx = [isite - 1] first_L = asxp(first_LR[isite - 1]) first_R = asxp(first_LR[isite]) second_L = asxp(second_LR[isite - 1]) second_R = asxp(second_LR[isite]) else: cidx = [isite - 2, isite - 1] first_L = asxp(first_LR[isite - 2]) first_R = asxp(first_LR[isite]) second_L = asxp(second_LR[isite - 2]) second_R = asxp(second_LR[isite]) # this part just be similar with ground state calculation qnbigl, qnbigr, qnmat = self.cv_mps._get_big_qn(cidx) xshape = qnmat.shape nonzeros = int(np.sum(qnmat == constrain_qn)) if self.method == '1site': guess = self.cv_mps[isite - 1][qnmat == constrain_qn] path_b = [([0, 1], "ab, acd->bcd"), ([1, 0], "bcd, de->bce")] vec_b = multi_tensor_contract( path_b, second_L, self.b_mps[isite - 1], second_R )[qnmat == constrain_qn] else: guess = tensordot( self.cv_mps[isite - 2], self.cv_mps[isite - 1], axes=(-1, 0) )[qnmat == constrain_qn] path_b = [([0, 1], "ab, acd->bcd"), ([2, 0], "bcd, def->bcef"), ([1, 0], "bcef, fg->bceg")] vec_b = multi_tensor_contract( path_b, second_L, self.b_mps[isite - 2], self.b_mps[isite - 1], second_R )[qnmat == constrain_qn] if self.method == "2site": a_oper_isite2 = asxp(self.a_oper[isite - 2]) else: a_oper_isite2 = None a_oper_isite1 = asxp(self.a_oper[isite - 1]) # use the diagonal part of mat_a to construct the preconditinoner # for linear solver part_l = xp.einsum('abca->abc', first_L) part_r = xp.einsum('hfgh->hfg', first_R) if self.method == "1site": # S-a d h-S # O-b -O- f-O # | e | # O-c -O- g-O # S-a i h-S path_pre = [([0, 1], "abc, bdef -> acdef"), ([1, 0], "acdef, ceig -> adfig")] a_diag = multi_tensor_contract(path_pre, part_l, a_oper_isite1, a_oper_isite1) a_diag = xp.einsum("adfdg -> adfg", a_diag) a_diag = xp.tensordot(a_diag, part_r, axes=([2, 3], [1, 2]))[qnmat == constrain_qn] else: # S-a d k h-S # O-b -O- j -O- f-O # | e l | # O-c -O- m -O- g-O # S-a i n h-S # first left half, second right half, last contraction path_pre = [([0, 1], "abc, bdej -> acdej"), ([1, 0], "acdej, ceim -> adjim")] a_diagl = multi_tensor_contract(path_pre, part_l, a_oper_isite2, a_oper_isite2) a_diagl = xp.einsum("adjdm -> adjm", a_diagl) path_pre = [([0, 1], "hfg, jklf -> hgjkl"), ([1, 0], "hgjkl, mlng -> hjkmn")] a_diagr = multi_tensor_contract(path_pre, part_r, a_oper_isite1, a_oper_isite1) a_diagr = xp.einsum("hjkmk -> khjm", a_diagr) a_diag = xp.tensordot( a_diagl, a_diagr, axes=([2, 3], [2, 3]))[qnmat == constrain_qn] a_diag = asnumpy(a_diag + xp.ones(nonzeros) * self.eta**2) M_x = lambda x: x / a_diag pre_M = scipy.sparse.linalg.LinearOperator((nonzeros, nonzeros), M_x) count = 0 # cache oe path if self.method == "2site": expr = oe.contract_expression( "abcd, befh, cfgi, hjkn, iklo, mnop, dglp -> aejm", first_L, a_oper_isite2, a_oper_isite2, a_oper_isite1, a_oper_isite1, first_R, xshape, constants=[0, 1, 2, 3, 4, 5]) def hop(c): nonlocal count count += 1 xstruct = asxp(cvec2cmat(xshape, c, qnmat, constrain_qn)) if self.method == "1site": path_a = [([0, 1], "abcd, aef->bcdef"), ([3, 0], "bcdef, begh->cdfgh"), ([2, 0], "cdfgh, cgij->dfhij"), ([1, 0], "dfhij, fhjk->dik")] ax1 = multi_tensor_contract(path_a, first_L, xstruct, a_oper_isite1, a_oper_isite1, first_R) else: # opt_einsum v3.2.1 is not bad, ~10% faster than the hand-design # contraction path for this complicated cases and consumes a little bit less memory # this is the only place in renormalizer we use opt_einsum now. # we keep it here just for a demo. # ax1 = oe.contract("abcd, befh, cfgi, hjkn, iklo, mnop, dglp -> aejm", # first_L, a_oper_isite2, a_oper_isite2, a_oper_isite1, # a_oper_isite1, first_R, xstruct) if USE_GPU: oe_backend = "cupy" else: oe_backend = "numpy" ax1 = expr(xstruct, backend=oe_backend) #print(oe.contract_path("abcd, befh, cfgi, hjkn, iklo, mnop, dglp -> aejm", # first_L, a_oper_isite2, a_oper_isite2, a_oper_isite1, # a_oper_isite1, first_R, xstruct)) #path_a = [([0, 1], "abcd, aefg->bcdefg"), # ([5, 0], "bcdefg, behi->cdfghi"), # ([4, 0], "cdfghi, ifjk->cdghjk"), # ([3, 0], "cdghjk, chlm->dgjklm"), # ([2, 0], "dgjklm, mjno->dgklno"), # ([1, 0], "dgklno, gkop->dlnp")] #ax1 = multi_tensor_contract(path_a, first_L, xstruct, # a_oper_isite2, a_oper_isite1, # a_oper_isite2, a_oper_isite1, # first_R) ax = ax1 + xstruct * self.eta**2 cout = ax[qnmat == constrain_qn] return asnumpy(cout) mat_a = scipy.sparse.linalg.LinearOperator((nonzeros, nonzeros), matvec=hop) x, info = scipy.sparse.linalg.cg(mat_a, asnumpy(vec_b), tol=1.e-5, x0=asnumpy(guess), M=pre_M, atol=0) self.hop_time.append(count) if info != 0: logger.info(f"iteration solver not converged") # the value of the functional L l_value = xp.dot(asxp(hop(x)), asxp(x)) - 2 * xp.dot(vec_b, asxp(x)) xstruct = cvec2cmat(xshape, x, qnmat, constrain_qn) self.cv_mps._update_mps(xstruct, cidx, qnbigl, qnbigr, self.m_max, percent) return float(l_value)
def optimize_cv(self, lr_group, isite, percent=0): if self.spectratype == "abs": # quantum number restriction, |1><0| up_exciton, down_exciton = 1, 0 elif self.spectratype == "emi": # quantum number restriction, |0><1| up_exciton, down_exciton = 0, 1 nexciton = 1 first_LR, second_LR, third_LR, forth_LR = lr_group if self.method == "1site": add_list = [isite - 1] first_L = asxp(first_LR[isite - 1]) first_R = asxp(first_LR[isite]) second_L = asxp(second_LR[isite - 1]) second_R = asxp(second_LR[isite]) third_L = asxp(third_LR[isite - 1]) third_R = asxp(third_LR[isite]) forth_L = asxp(forth_LR[isite - 1]) forth_R = asxp(forth_LR[isite]) else: add_list = [isite - 2, isite - 1] first_L = asxp(first_LR[isite - 2]) first_R = asxp(first_LR[isite]) second_L = asxp(second_LR[isite - 2]) second_R = asxp(second_LR[isite]) third_L = asxp(third_LR[isite - 2]) third_R = asxp(third_LR[isite]) forth_L = asxp(forth_LR[isite - 2]) forth_R = asxp(forth_LR[isite]) xqnmat, xqnbigl, xqnbigr, xshape = \ self.construct_X_qnmat(add_list) dag_qnmat, dag_qnbigl, dag_qnbigr = self.swap(xqnmat, xqnbigl, xqnbigr) nonzeros = int( np.sum(self.condition(dag_qnmat, [down_exciton, up_exciton]))) if self.method == "1site": guess = moveaxis(self.cv_mpo[isite - 1], (1, 2), (2, 1)) else: guess = tensordot(moveaxis(self.cv_mpo[isite - 2], (1, 2), (2, 1)), moveaxis(self.cv_mpo[isite - 1]), axes=(-1, 0)) guess = guess[self.condition(dag_qnmat, [down_exciton, up_exciton])] if self.method == "1site": # define dot path path_1 = [([0, 1], "abcd, aefg -> bcdefg"), ([3, 0], "bcdefg, bfhi -> cdeghi"), ([2, 0], "cdeghi, chjk -> degijk"), ([1, 0], "degijk, gikl -> dejl")] path_2 = [([0, 1], "abcd, aefg -> bcdefg"), ([3, 0], "bcdefg, bfhi -> cdeghi"), ([2, 0], "cdeghi, djek -> cghijk"), ([1, 0], "cghijk, gilk -> chjl")] path_3 = [([0, 1], "ab, acde -> bcde"), ([1, 0], "bcde, ef -> bcdf")] vecb = multi_tensor_contract( path_3, forth_L, moveaxis(self.b_mpo[isite - 1], (1, 2), (2, 1)), forth_R)[self.condition(dag_qnmat, [down_exciton, up_exciton])] a_oper_isite = asxp(self.a_oper[isite - 1]) h_mpo_isite = asxp(self.h_mpo[isite - 1]) # construct preconditioner Idt = xp.identity(h_mpo_isite.shape[1]) M1_1 = xp.einsum('abca->abc', first_L) path_m1 = [([0, 1], "abc, bdef->acdef"), ([1, 0], "acdef, cegh->adfgh")] M1_2 = multi_tensor_contract(path_m1, M1_1, a_oper_isite, a_oper_isite) M1_2 = xp.einsum("abcbd->abcd", M1_2) M1_3 = xp.einsum('ecde->ecd', first_R) M1_4 = xp.einsum('ff->f', Idt) path_m1 = [([0, 1], "abcd,ecd->abe"), ([1, 0], "abe,f->abef")] pre_M1 = multi_tensor_contract(path_m1, M1_2, M1_3, M1_4) pre_M1 = xp.moveaxis(pre_M1, [-2, -1], [-1, -2])[self.condition( dag_qnmat, [down_exciton, up_exciton])] M2_1 = xp.einsum('aeag->aeg', second_L) M2_2 = xp.einsum('eccf->ecf', a_oper_isite) M2_3 = xp.einsum('gbbh->gbh', h_mpo_isite) M2_4 = xp.einsum('dfdh->dfh', second_R) path_m2 = [([0, 1], "aeg,gbh->aebh"), ([2, 0], "aebh,ecf->abchf"), ([1, 0], "abhcf,dfh->abcd")] pre_M2 = multi_tensor_contract(path_m2, M2_1, M2_3, M2_2, M2_4) pre_M2 = pre_M2[self.condition(dag_qnmat, [down_exciton, up_exciton])] M4_1 = xp.einsum('faah->fah', third_L) M4_4 = xp.einsum('gddi->gdi', third_R) M4_5 = xp.einsum('cc->c', Idt) M4_path = [([0, 1], "fah,febg->ahebg"), ([2, 0], "ahebg,hjei->abgji"), ([1, 0], "abgji,gdi->abjd")] pre_M4 = multi_tensor_contract(M4_path, M4_1, h_mpo_isite, h_mpo_isite, M4_4) pre_M4 = xp.einsum('abbd->abd', pre_M4) pre_M4 = xp.tensordot(pre_M4, M4_5, axes=0) pre_M4 = xp.moveaxis(pre_M4, [2, 3], [3, 2])[self.condition( dag_qnmat, [down_exciton, up_exciton])] M_x = lambda x: asnumpy( asxp(x) / (pre_M1 + 2 * pre_M2 + pre_M4 + xp.ones(nonzeros) * self.eta**2)) pre_M = scipy.sparse.linalg.LinearOperator((nonzeros, nonzeros), M_x) count = 0 def hop(x): nonlocal count count += 1 dag_struct = asxp(self.dag2mat(xshape, x, dag_qnmat)) if self.method == "1site": M1 = multi_tensor_contract(path_1, first_L, dag_struct, a_oper_isite, a_oper_isite, first_R) M2 = multi_tensor_contract(path_2, second_L, dag_struct, a_oper_isite, h_mpo_isite, second_R) M2 = xp.moveaxis(M2, (1, 2), (2, 1)) M3 = multi_tensor_contract(path_2, third_L, h_mpo_isite, dag_struct, h_mpo_isite, third_R) M3 = xp.moveaxis(M3, (1, 2), (2, 1)) cout = M1 + 2 * M2 + M3 + dag_struct * self.eta**2 cout = cout[self.condition(dag_qnmat, [down_exciton, up_exciton])] return asnumpy(cout) # Matrix A mat_a = scipy.sparse.linalg.LinearOperator((nonzeros, nonzeros), matvec=hop) x, info = scipy.sparse.linalg.cg(mat_a, asnumpy(vecb), tol=1.e-5, x0=asnumpy(guess), maxiter=500, M=pre_M, atol=0) # logger.info(f"linear eq dim: {nonzeros}") # logger.info(f'times for hop:{count}') self.hop_time.append(count) if info != 0: logger.warning( f"cg not converged, vecb.norm:{xp.linalg.norm(vecb)}") l_value = xp.dot(asxp(hop(x)), asxp(x)) - 2 * xp.dot(vecb, asxp(x)) x = self.dag2mat(xshape, x, dag_qnmat) if self.method == "1site": x = np.moveaxis(x, [1, 2], [2, 1]) x, xdim, xqn, compx = self.x_svd(x, xqnbigl, xqnbigr, nexciton, percent=percent) if self.method == "1site": self.cv_mpo[isite - 1] = x if not self.cv_mpo.to_right: if isite != 1: self.cv_mpo[isite - 2] = \ tensordot(self.cv_mpo[isite - 2], compx, axes=(-1, 0)) self.cv_mpo.qn[isite - 1] = xqn self.cv_mpo.qnidx = isite - 2 else: self.cv_mpo[isite - 1] = \ tensordot(compx, self.cv_mpo[isite - 1], axes=(-1, 0)) self.cv_mpo.qnidx = 0 else: if isite != len(self.cv_mpo): self.cv_mpo[isite] = \ tensordot(compx, self.cv_mpo[isite], axes=(-1, 0)) self.cv_mpo.qn[isite] = xqn self.cv_mpo.qnidx = isite else: self.cv_mpo[isite - 1] = \ tensordot(self.cv_mpo[isite - 1], compx, axes=(-1, 0)) self.cv_mpo.qnidx = self.cv_mpo.site_num - 1 else: if not self.cv_mpo.to_right: self.cv_mpo[isite - 2] = compx self.cv_mpo[isite - 1] = x self.cv_mpo.qnidx = isite - 2 else: self.cv_mpo[isite - 2] = x self.cv_mpo[isite - 1] = compx self.cv_mpo.qnidx = isite - 1 self.cv_mpo.qn[isite - 1] = xqn return float(l_value)