def test1(): '''测试leftblock''' #first_block中的phi有四个分量,内容和一个格子是一致的 left = first_leftblock(1) cuop = create_operator_of_site(left.fock_basis, OperFactory.create_spinup()) #nuop = create_operator_of_site(left.fock_basis, OperFactory.number_spinup()) leftext = extend_leftblock(left) print(left) print(cuop) #print(nuop) #print('验证:', numpy.matmul(cuop.mat, cuop.mat.transpose())) cuopext = leftblock_extend_oper(leftext, cuop) #nuopext = leftblock_extend_oper(leftext, nuop) print(cuopext) #print('验证2:', nuopext) #print(numpy.matmul(cuopext.mat,cuopext.mat.transpose())) # print('site上的算符更新') # cuop2 = create_operator_of_site(leftext.stbss, OperFactory.create_spinup()) #nuop2 = create_operator_of_site(leftext.stbss, OperFactory.number_spinup()) print(cuop2) cuop2ext = leftsite_extend_oper(leftext, cuop2) #nuop2ext = leftsite_extend_oper(leftext, nuop2) print(cuop2ext) #print('验证3:', nuop2ext) #print(numpy.matmul(cuop2ext.mat, cuop2ext.mat.transpose())) # phival = random_phival([4, 16], leftext) newleft = update_to_leftblock(leftext, phival) print(newleft) # newcuop = update_leftblockextend_oper(newleft, cuopext, phival) #newnuop = update_leftblockextend_oper(newleft, nuopext, phival) print(newcuop) #print('验证4', newnuop) #print(numpy.matmul(newnuop.mat, newnuop.mat.transpose())) # newleftext = extend_leftblock(newleft) newcuopext = leftblock_extend_oper(newleftext, newcuop) phival = random_phival([8, 16], newleftext) newnewleft = update_to_leftblock(newleftext, phival) newnewcuop = update_leftblockextend_oper(newnewleft, newcuopext, phival) print(newnewcuop)
def leftblock_to_next( conf: DMRGConfig, phi_idx: int, newbonds: List[int], extoper_storage: List[int], tmpoper_storage: List[int], measuer_storage: List[Tuple[str, int]] ): '''将leftblock向前推进一个格子\n left是需要推进的block\n phi_idx是新的left的idx,这个和block_len不一定是一个东西\n conf是程序运行时的句柄\n newbonds是新加的这个site和哪些site有bond(新的site是left.fock_basis.site[-1]+1)\n extoper_storage是指|phi^phi_idx-1, s^phi_idx>这个基上有哪些算符需要保存\n 村下来的extoper以后要用到superblock上。\n tmpoper_storage是指在|phi^phi_idx>这个基上要临时存储的算符,用在下一次递推\n 时的哈密顿量计算,以及下一次的迭代中需要保存的ext\n measure_storage是指以后dmrg需要观测的过程中要计算的算符,需要保存到leftstorage中\n leftext[phi_idx-1]上面的观测算符也需要保存,由于leftext[phi_idx-1]和\n leftblock[phi_idx]实际上是一个block_len这里用一个参数指定就可以了\n ''' leftstorage = conf.get_leftblock_storage(phi_idx-1) left = leftstorage.block hamleft = leftstorage.hamiltonian ### 开始处理leftext在leftext上面工作的内容 #把left扩展一个格子 leftext = extend_leftblock(left) #给leftext进行初始化,leftext[phi_idx-1]中存的就是leftblock[phi_idx-1]的extend conf.leftext_reset(phi_idx-1, leftext) #把哈密顿量扩展一个格子 hamleft = extend_leftblock_hamiltonian(hamleft, leftext) #把扩展基上的哈密顿量存下来 conf.storage_leftext_ham(phi_idx-1, hamleft) #存储需要进行扩展的算符 maintain_opers = {} #创建新的格子上厄的两个算符并且扩展 newup = create_operator_of_site(leftext.stbss, OperFactory.create_spinup()) newup = leftsite_extend_oper(leftext, newup) newdown = create_operator_of_site(leftext.stbss, OperFactory.create_spindown()) newdown = leftsite_extend_oper(leftext, newdown) #if leftext.stbss.sites[0] in tmpoper_storage: maintain_opers[leftext.stbss.sites[0]] = (newup, newdown) #把left_tmp中所有的算符扩展 #注意这个时候leftstorage.oper_storage_list会被修改, #不能在这个上面进行循环 while len(leftstorage.oper_storage_list) > 0: #for stidx in leftstorage.oper_storage_list: stidx = leftstorage.oper_storage_list[0] #把left上面的两个算符弹出 stup, stdown = leftstorage.pop_oper(stidx) #将两个算符扩展 stup = leftblock_extend_oper(leftext, stup) stdown = leftblock_extend_oper(leftext, stdown) maintain_opers[stidx] = (stup, stdown) #把需要保存到leftext中的算符保存下来 for stidx in extoper_storage: conf.storage_leftext_oper(phi_idx-1, maintain_opers[stidx][0]) conf.storage_leftext_oper(phi_idx-1, maintain_opers[stidx][1]) #找到构成bond的算符,把新的hopping添加到哈密顿量 for bstidx in newbonds: coef_t = conf.model.get_t_coef(bstidx, newup.siteidx) #自旋上部分 tar_up = maintain_opers[bstidx][0] hamleft.add_hopping_term(newup, tar_up, coef_t) #自旋下部分 tar_down = maintain_opers[bstidx][1] hamleft.add_hopping_term(newdown, tar_down, coef_t) #构建U项并扩展,然后添加到哈密顿量 newiu = create_operator_of_site(leftext.stbss, OperFactory.create_u()) newiu = leftsite_extend_oper(leftext, newiu) hamleft.add_u_term(newiu, conf.model.coef_u) #构建Mu项并扩展,然后添加到哈密顿量 coef_mu = conf.model.get_coef_mu(phi_idx) if coef_mu != 0: newnu = create_operator_of_site(leftext.stbss, OperFactory.create_numup()) newnu = leftsite_extend_oper(leftext, newnu) hamleft.add_mu_term(newnu, coef_mu) newnd = create_operator_of_site(leftext.stbss, OperFactory.create_numdown()) newnd = leftsite_extend_oper(leftext, newnd) hamleft.add_mu_term(newnd, coef_mu) ### 开始从leftext升级到下一个left #调用get_phival_from_hamleft,得到能量本正值 #获得相应的自旋sector限制 if phi_idx <= numpy.floor_divide(conf.model.size, 2): sector = None else: sector = [] sp1min = conf.spin_sector[0] - (conf.model.size - phi_idx) if sp1min < 0: sp1min = 0 sp1max = conf.spin_sector[0] sp2min = conf.spin_sector[1] - (conf.model.size - phi_idx) if sp2min < 0: sp2min = 0 sp2max = conf.spin_sector[1] for se1 in range(sp1min, sp1max + 1): for se2 in range(sp2min, sp2max + 1): sector.append((se1, se2)) phival = get_phival_from_hamleft( hamleft, leftext, conf.nrg_max_keep, restrict_sector=sector ) #给leftext升级成新的基,这个时候phival其实没什么用 #但fermionic在DEBUG_MODE下会解出basis在fock_basis下的系数,这个是需要的 newleft = update_to_leftblock(leftext, phival) #给哈密顿量进行更新 hamleft = update_leftblockextend_hamiltonian(newleft, hamleft, phival) #把tmp中保存的leftblock[phi_idx-1]的观测算符取出来,并扩展到 #leftext[phi_idx-1] ext_measops = {} for meas in measuer_storage: if meas[1] == phi_idx:#如果是这次新加的格子,就创建一个而不是读取 meaop = create_operator_of_site( leftext.stbss, OperFactory.create_by_name(meas[0]) ) meaop = leftsite_extend_oper(leftext, meaop) else: meaop = leftstorage.get_meas(meas[0], meas[1]) meaop = leftblock_extend_oper(leftext, meaop) ext_measops['%s,%d' % meas] = meaop #给conf中的left_tmp重置,现在left_tmp应该保存phi_idx的哈密顿量和算符了 conf.left_tmp_reset(phi_idx, newleft, hamleft) #给下一次运算需要保存的算符更新 for stidx in tmpoper_storage: up_ext, down_ext = maintain_opers[stidx] up_upd = update_leftblockextend_oper(newleft, up_ext, phival) down_upd = update_leftblockextend_oper(newleft, down_ext, phival) conf.left_tmp_add_oper(up_upd) conf.left_tmp_add_oper(down_upd) #给以后需要观测的算符进行保存 leftext_stor = conf.get_leftext_storage(phi_idx-1) leftstorage = conf.get_leftblock_storage(phi_idx) for prefix, stidx in measuer_storage: #如果这个site是之前的block上面的,已经扩展到了leftext[phi_idx-1] #保存到leftext[phi_idx-1] #升级到leftblock[phi_idx],存到left_tmp meaop = ext_measops['%s,%d' % (prefix, stidx)] leftext_stor.storage_meas(prefix, meaop) #升级 meaop = update_leftblockextend_oper(newleft, meaop, phival) leftstorage.storage_meas(prefix, meaop) return newleft
def leftblockextend_to_next(conf: DMRGConfig, phi_idx: int, extrabonds: List[int], newbonds: List[int], extoper_storage: List[int], measure_storage: List[Tuple[str, int]], spin_sector, maxkeep: int): '''在右侧的rightext迭代到位以后,开始左侧的迭代\n 这个时候的phi_idx是新的leftblockext的idx,新加的site就是phi_idx+1,\n 相对应的rightblockext的idx就是phi_idx+2 ''' leftstorage = conf.get_leftext_storage(phi_idx - 1) rightstorage = conf.get_rightext_storage(phi_idx + 2) #首先要把superblock拼出来 sector_idxs, mat, superext = get_superblock_ham(conf.model, leftstorage, rightstorage, spin_sector, extrabonds) #把基态解出来 eigvals, eigvecs = scipy.sparse.linalg.eigsh(mat, k=1, which='SA') #numpy.linalg.eigh(mat) ground = eigvecs[:, 0] ground_erg = eigvals[0] #把基态的信息保留下来 conf.ground_vec = ground conf.ground_secidx = sector_idxs conf.ground_superext = superext #构造密度矩阵 lidxs, ridxs, mat = get_density_root( #pylint: disable=unused-variable leftstorage, rightstorage, sector_idxs, ground) #这个mat行是phi^phi_idx-1,s^phi_idx,列是s^phi_idx+1,phi^phi_idx+2 #lidxs是mat上的行到leftext的基的指标 #利用矩阵乘法收缩掉列 denmat = numpy.matmul(mat, mat.transpose()) #把密度矩阵再分成相同粒子数在一起的小块 #这时就不需要到superblock上面的idx了,需要从phi^phi_idx-1,s^phi_idx #到leftext的idx spsec_mat_dict = get_density_in_sector(leftstorage, lidxs, denmat) #获得更新基的phival #这个过程就是给每一个denmat的subblock对角化,注意phival最后会 #放到整个leftext的基上面去 phival = get_phival_from_density_sector(leftstorage, spsec_mat_dict, maxkeep) #现在给leftblockext升级成新的位置上面的ext #先从leftblkext升级到left + 1 leftext = leftstorage.block newleftblk = update_to_leftblock(leftext, phival) #然后升级哈密顿量 newham = update_leftblockextend_hamiltonian(newleftblk, leftstorage.hamiltonian, phival) #调整一下tmp中的idx,没有实际的价值 conf.left_tmp_reset(phi_idx, newleftblk, None) #先把newleftblk上面的算符升级,然后再扩展 maintain_dict = {} for extidx in leftstorage.oper_storage_list: extup = leftstorage.get_oper(extidx, 1) newup = update_leftblockextend_oper(newleftblk, extup, phival) extdn = leftstorage.get_oper(extidx, -1) newdn = update_leftblockextend_oper(newleftblk, extdn, phival) maintain_dict[extidx] = (newup, newdn) #完成phi_idx位置上的leftblock,开始扩展它到leftblockextend newleftext = extend_leftblock(newleftblk) #把哈密顿量也扩展 newhamext = extend_leftblock_hamiltonian(newham, newleftext) #以后的观测需要的算符,从leftext[phi_idx-1]中拿到,再升级扩展 #以后放到leftext[phi_idx]中 ext_meaops = {} for prefix, stidx in measure_storage: if stidx == phi_idx + 1: #如果是新加的格子上的算符,不需要升级 #新建一个观测用的算符 meaop = create_operator_of_site(newleftext.stbss, OperFactory.create_by_name(prefix)) #扩展到leftext[phi_idx]上 meaop = leftsite_extend_oper(newleftext, meaop) else: #得到meaop meaop = leftstorage.get_meas(prefix, stidx) #升级meaop到leftblock[phi_idx] meaop = update_leftblockextend_oper(newleftblk, meaop, phival) #扩展 meaop = leftblock_extend_oper(newleftext, meaop) ext_meaops['%s,%d' % (prefix, stidx)] = meaop #把现在的缓存进去 conf.leftext_reset(phi_idx, newleftext) conf.storage_leftext_ham(phi_idx, newhamext) #获得新加的格子的两个算符并且扩展 newsiteup = create_operator_of_site(newleftext.stbss, OperFactory.create_spinup()) newsitedn = create_operator_of_site(newleftext.stbss, OperFactory.create_spindown()) newsiteup = leftsite_extend_oper(newleftext, newsiteup) newsitedn = leftsite_extend_oper(newleftext, newsitedn) #把之前存的升级到newleftblk上的算符也扩展了 for idx in maintain_dict: extup, extdn = maintain_dict[idx] extup = leftblock_extend_oper(newleftext, extup) extdn = leftblock_extend_oper(newleftext, extdn) maintain_dict[idx] = (extup, extdn) #新加的site是phi_idx + 1 maintain_dict[phi_idx + 1] = (newsiteup, newsitedn) #把新的hopping项加进去 for bnd in newbonds: coef_t = conf.model.get_t_coef(bnd, newsiteup.siteidx) newhamext.add_hopping_term(maintain_dict[bnd][0], newsiteup, coef_t) newhamext.add_hopping_term(maintain_dict[bnd][1], newsitedn, coef_t) #把新的格子的U项添加进去 newiu = create_operator_of_site(newleftext.stbss, OperFactory.create_u()) newiu = leftsite_extend_oper(newleftext, newiu) newhamext.add_u_term(newiu, conf.model.coef_u) #把新的格子的Mu项添加进去 coef_mu = conf.model.get_coef_mu(phi_idx + 1) if coef_mu != 0: newnu = create_operator_of_site(newleftext.stbss, OperFactory.create_numup()) newnu = leftsite_extend_oper(newleftext, newnu) newhamext.add_mu_term(newnu, coef_mu) newnd = create_operator_of_site(newleftext.stbss, OperFactory.create_numdown()) newnd = leftsite_extend_oper(newleftext, newnd) newhamext.add_mu_term(newnd, coef_mu) #保存需要保存的算符 for extidx in extoper_storage: conf.storage_leftext_oper(phi_idx, maintain_dict[extidx][0]) conf.storage_leftext_oper(phi_idx, maintain_dict[extidx][1]) #把需要保存的观测用的算符保存到leftext[phi_idx]当中 leftstor_phiidx = conf.get_leftext_storage(phi_idx) for prefix, stidx in measure_storage: meaop = ext_meaops['%s,%d' % (prefix, stidx)] leftstor_phiidx.storage_meas(prefix, meaop) return ground_erg
def test1(): '''测试superblock 测试在5个格子时候的哈密顿量是否正确,验证自旋上的部分 ''' # left1 = first_leftblock(1) #现在只有第一个格子,创建第一个格子的哈密顿量和产生算符 hamleft = create_hamiltonian_of_site(left1.fock_basis, -1., 0) print('U', hamleft) cup_in_1 = create_operator_of_site(left1.fock_basis, OperFactory.create_spinup()) #将第一个格子扩展到第二个 left1ext = extend_leftblock(left1) #在扩展过的格子上的C^+_1 cup_in_1 = leftblock_extend_oper(left1ext, cup_in_1) #扩展以后有C^+_2 cup_in_2 = create_operator_of_site(left1ext.stbss, OperFactory.create_spinup()) cup_in_2 = leftsite_extend_oper(left1ext, cup_in_2) #扩展以后的U2 ciu_in_2 = create_operator_of_site(left1ext.stbss, OperFactory.create_u()) ciu_in_2 = leftsite_extend_oper(left1ext, ciu_in_2) #现在C^+_1和C^+_2都在|phi^1, s^2>这个left1ext基上,整合进哈密顿量 #先把哈密顿量也放到|phi^1, s^2>这个基上 hamleft = extend_leftblock_hamiltonian(hamleft, left1ext) hamleft.add_hopping_term(cup_in_1, cup_in_2, 1.0) hamleft.add_u_term(ciu_in_2, -1.0) print('U', hamleft) #然后升级left1ext到left2 |phi^2> phival = numpy.eye(16) left2 = update_to_leftblock(left1ext, phival) hamleft = update_leftblockextend_hamiltonian(left2, hamleft, phival) cup_in_1 = update_leftblockextend_oper(left2, cup_in_1, phival) cup_in_2 = update_leftblockextend_oper(left2, cup_in_2, phival) #将|phi^2>扩展到|phi^2, s^3> left2ext = extend_leftblock(left2) #把哈密顿量和第二个算符也扩展过去 hamleft = extend_leftblock_hamiltonian(hamleft, left2ext) cup_in_2 = leftblock_extend_oper(left2ext, cup_in_2) #创建第三个格子的算符,然后放到|phi^2, s^3> cup_in_3 = create_operator_of_site(left2ext.stbss, OperFactory.create_spinup()) cup_in_3 = leftsite_extend_oper(left2ext, cup_in_3) #把2-3之间的hopping放到哈密顿量里 hamleft.add_hopping_term(cup_in_2, cup_in_3, 1.0) #把|phi^2, s^3>这个东西以后要用来生成supoerblock print(hamleft) print(cup_in_3) # # right = first_rightblock(5) #现在只有第5个格子 hamright = create_hamiltonian_of_site(right.fock_basis, -1.0, 0) print('rU', hamright) cup_in_5 = create_operator_of_site(right.fock_basis, OperFactory.create_spinup()) #将第5个格子扩展到第4个 |s^4, phi^1> rightext = extend_rightblock(right) #把第5个格子的算符扩展 cup_in_5 = rightblock_extend_oper(rightext, cup_in_5) #创建第四个格子的算符并且扩展 cup_in_4 = create_operator_of_site(rightext.stbss, OperFactory.create_spinup()) cup_in_4 = rightsite_extend_oper(rightext, cup_in_4) #第四个格子上的U ciu_in_4 = create_operator_of_site(rightext.stbss, OperFactory.create_u()) ciu_in_4 = rightsite_extend_oper(rightext, ciu_in_4) #把哈密顿量扩展到|s^4, phi^1> hamright = extend_rightblock_hamiltonian(hamright, rightext) #添加4-5的hopping hamright.add_hopping_term(cup_in_4, cup_in_5, 1.0) hamright.add_u_term(ciu_in_4, -1.0) print('rU', hamright) print(cup_in_4) # #创建superblock superblock = extend_merge_to_superblock(left2ext, rightext) print(superblock) #把左边的哈密顿量扩展到superblock上面 hamleft = leftext_hamiltonian_to_superblock(superblock, hamleft) cup_in_3_e = leftext_oper_to_superblock(superblock, cup_in_3) print(hamleft) print(cup_in_3_e) # #for ridx in superblock.iter_idx(): # for lidx in superblock.iter_idx(): # if hamleft.mat[lidx, ridx] == 0: # continue # idx1, idx2, idx3, idx4 = superblock.idx_to_idxtuple(ridx) # rsta = '%s,%s,%s,%s' %\ # (left1ext.idx_to_state(idx1),\ # superblock.leftblockextend.stbss.idx_to_state(idx2),\ # superblock.rightblockextend.stbss.idx_to_state(idx3),\ # superblock.rightblockextend.rblk.idx_to_state(idx4)) # idx1, idx2, idx3, idx4 = superblock.idx_to_idxtuple(lidx) # lsta = '%s,%s,%s,%s' %\ # (left1ext.idx_to_state(idx1),\ # superblock.leftblockextend.stbss.idx_to_state(idx2),\ # superblock.rightblockextend.stbss.idx_to_state(idx3),\ # superblock.rightblockextend.rblk.idx_to_state(idx4)) # print(rsta, lsta, hamleft.mat[lidx, ridx]) #for ridx in superblock.iter_idx(): # for lidx in superblock.iter_idx(): # if cup_in_3.mat[lidx, ridx] == 0: # continue # idx1, idx2, idx3, idx4 = superblock.idx_to_idxtuple(ridx) # rsta = '%s,%s,%s,%s' %\ # (left1ext.idx_to_state(idx1),\ # superblock.leftblockextend.stbss.idx_to_state(idx2),\ # superblock.rightblockextend.stbss.idx_to_state(idx3),\ # superblock.rightblockextend.rblk.idx_to_state(idx4)) # idx1, idx2, idx3, idx4 = superblock.idx_to_idxtuple(lidx) # lsta = '%s,%s,%s,%s' %\ # (left1ext.idx_to_state(idx1),\ # superblock.leftblockextend.stbss.idx_to_state(idx2),\ # superblock.rightblockextend.stbss.idx_to_state(idx3),\ # superblock.rightblockextend.rblk.idx_to_state(idx4)) # print(rsta, lsta, cup_in_3.mat[lidx, ridx]) # #把右边的哈密顿量扩展到superblock上面 hamright = rightext_hamiltonian_to_superblock(superblock, hamright) print(hamright) #for ridx in superblock.iter_idx(): # for lidx in superblock.iter_idx(): # if hamright.mat[lidx, ridx] == 0: # continue # idx1, idx2, idx3, idx4 = superblock.idx_to_idxtuple(ridx) # rsta = '%s,%s,%s,%s' %\ # (left1ext.idx_to_state(idx1),\ # superblock.leftblockextend.stbss.idx_to_state(idx2),\ # superblock.rightblockextend.stbss.idx_to_state(idx3),\ # superblock.rightblockextend.rblk.idx_to_state(idx4)) # idx1, idx2, idx3, idx4 = superblock.idx_to_idxtuple(lidx) # lsta = '%s,%s,%s,%s' %\ # (left1ext.idx_to_state(idx1),\ # superblock.leftblockextend.stbss.idx_to_state(idx2),\ # superblock.rightblockextend.stbss.idx_to_state(idx3),\ # superblock.rightblockextend.rblk.idx_to_state(idx4)) # print(rsta, lsta, hamright.mat[lidx, ridx]) # #把右边的算符扩展到superblock上面 cup_in_4_e = rightext_oper_to_superblock(superblock, cup_in_4) print(cup_in_4_e) #for ridx in superblock.iter_idx(): # for lidx in superblock.iter_idx(): # if cup_in_4.mat[lidx, ridx] == 0: # continue # idx1, idx2, idx3, idx4 = superblock.idx_to_idxtuple(ridx) # rsta = '%s,%s,%s,%s' %\ # (left1ext.idx_to_state(idx1),\ # superblock.leftblockextend.stbss.idx_to_state(idx2),\ # superblock.rightblockextend.stbss.idx_to_state(idx3),\ # superblock.rightblockextend.rblk.idx_to_state(idx4)) # idx1, idx2, idx3, idx4 = superblock.idx_to_idxtuple(lidx) # lsta = '%s,%s,%s,%s' %\ # (left1ext.idx_to_state(idx1),\ # superblock.leftblockextend.stbss.idx_to_state(idx2),\ # superblock.rightblockextend.stbss.idx_to_state(idx3),\ # superblock.rightblockextend.rblk.idx_to_state(idx4)) # print(rsta, lsta, cup_in_4.mat[lidx, ridx]) hamsuper = plus_two_hamiltonian(hamleft, hamright) res1 = hamsuper.add_hopping_term(cup_in_3_e, cup_in_4_e, 1.0) res2 = hamsuper.superblock_add_hopping_term(cup_in_3, cup_in_4, 1.0) print('superblock_add_hopping_term: ', numpy.allclose(res1, res2)) print(hamsuper)
def test2(): '''测试superblock 测试在5个格子时候的哈密顿量是否正确,验证自旋下的部分 ''' # left1 = first_leftblock(1) #现在只有第一个格子,创建第一个格子的哈密顿量和产生算符 hamleft = create_hamiltonian_of_site(left1.fock_basis, 0, 0) cdn_in_1 = create_operator_of_site(left1.fock_basis, OperFactory.create_spindown()) #将第一个格子扩展到第二个 left1ext = extend_leftblock(left1) #在扩展过的格子上的C^+_1 cdn_in_1 = leftblock_extend_oper(left1ext, cdn_in_1) #扩展以后有C^+_2 cdn_in_2 = create_operator_of_site(left1ext.stbss, OperFactory.create_spindown()) cdn_in_2 = leftsite_extend_oper(left1ext, cdn_in_2) #现在C^+_1和C^+_2都在|phi^1, s^2>这个left1ext基上,整合进哈密顿量 #先把哈密顿量也放到|phi^1, s^2>这个基上 hamleft = extend_leftblock_hamiltonian(hamleft, left1ext) hamleft.add_hopping_term(cdn_in_1, cdn_in_2, 1.0) #然后升级left1ext到left2 |phi^2> phival = numpy.eye(16) left2 = update_to_leftblock(left1ext, phival) hamleft = update_leftblockextend_hamiltonian(left2, hamleft, phival) cdn_in_1 = update_leftblockextend_oper(left2, cdn_in_1, phival) cdn_in_2 = update_leftblockextend_oper(left2, cdn_in_2, phival) #将|phi^2>扩展到|phi^2, s^3> left2ext = extend_leftblock(left2) #把哈密顿量和第二个算符也扩展过去 hamleft = extend_leftblock_hamiltonian(hamleft, left2ext) cdn_in_2 = leftblock_extend_oper(left2ext, cdn_in_2) #创建第三个格子的算符,然后放到|phi^2, s^3> cdn_in_3 = create_operator_of_site(left2ext.stbss, OperFactory.create_spindown()) cdn_in_3._mat = numpy.random.randn(cdn_in_3.mat.shape[0], cdn_in_3.mat.shape[1]) cdn_in_3 = leftsite_extend_oper(left2ext, cdn_in_3) #把2-3之间的hopping放到哈密顿量里 hamleft.add_hopping_term(cdn_in_2, cdn_in_3, 1.0) #把|phi^2, s^3>这个东西以后要用来生成supoerblock print(hamleft) print(cdn_in_3) # # right = first_rightblock(5) #现在只有第5个格子 hamright = create_hamiltonian_of_site(right.fock_basis, 0, 0) cdn_in_5 = create_operator_of_site(right.fock_basis, OperFactory.create_spindown()) #将第5个格子扩展到第4个 |s^4, phi^1> rightext = extend_rightblock(right) #把第5个格子的算符扩展 cdn_in_5 = rightblock_extend_oper(rightext, cdn_in_5) #创建第四个格子的算符并且扩展 cdn_in_4 = create_operator_of_site(rightext.stbss, OperFactory.create_spindown()) #Issue #16:用一个随机的矩阵来验证优化的正确性 cdn_in_4._mat = numpy.random.randn(cdn_in_4.mat.shape[0], cdn_in_4.mat.shape[1]) cdn_in_4 = rightsite_extend_oper(rightext, cdn_in_4) #把哈密顿量扩展到|s^4, phi^1> hamright = extend_rightblock_hamiltonian(hamright, rightext) #添加4-5的hopping hamright.add_hopping_term(cdn_in_4, cdn_in_5, 1.0) print(hamright) print(cdn_in_4) # #创建superblock superblock = extend_merge_to_superblock(left2ext, rightext) print(superblock) #把左边的哈密顿量扩展到superblock上面 hamleft = leftext_hamiltonian_to_superblock(superblock, hamleft) cdn_in_3_e = leftext_oper_to_superblock(superblock, cdn_in_3) print(hamleft) print(cdn_in_3_e) #把右边的哈密顿量扩展到superblock上面 hamright = rightext_hamiltonian_to_superblock(superblock, hamright) print(hamright) #把右边的算符扩展到superblock上面 cdn_in_4_e = rightext_oper_to_superblock(superblock, cdn_in_4) print(cdn_in_4_e) # hamsuper = plus_two_hamiltonian(hamleft, hamright) res1 = hamsuper.add_hopping_term(cdn_in_3_e, cdn_in_4_e, 1.0) res2 = hamsuper.superblock_add_hopping_term(cdn_in_3, cdn_in_4, 1.0) print('superblock_add_hopping_term: ', numpy.allclose(res1, res2)) print(hamsuper)