def bc_cid(ci_vector, clustered_ham, thresh_cipsi=1e-4, thresh_conv=1e-8, n_roots=1): # {{{ print(" Compute diagonal elements", flush=True) # compute local states energies precompute_cluster_basis_energies(clustered_ham) print(" done.", flush=True) pt_vector = ci_vector.copy() Hd_vector = ClusteredState(ci_vector.clusters) print() print( " ===================================================================") print(" CID epsilon: %12.8f" % (thresh_cipsi)) print( " ===================================================================") print(" Build full Hamiltonian", flush=True) H = build_full_hamiltonian(clustered_ham, ci_vector) print(" Diagonalize Hamiltonian Matrix:", flush=True) vguess = ci_vector.get_vector() if H.shape[0] > 100 and abs(np.sum(vguess)) > 0: e, v = scipy.sparse.linalg.eigsh(H, n_roots, v0=vguess, which='SA') else: e, v = np.linalg.eigh(H) idx = e.argsort() e = e[idx] v = v[:, idx] v0 = v[:, 0] e0 = e[0] print(" Ground state of CI: %12.8f CI Dim: %4i " % (e[0].real, len(ci_vector))) ci_vector.zero() ci_vector.set_vector(v0) ci_dim = len(ci_vector) return ci_vector, e0
mol.nelectron, ecore=0, verbose=4) hci_dim = civec[0].shape[0] print(" HCI: %12.8f Dim:%6d" % (ehci, hci_dim)) print("HCI %10.8f" % (ehci + enu)) blocks = [[0, 1, 2], [3, 4, 5]] #blocks = [[0,1],[2,3],[4,5],[6,7]] n_blocks = len(blocks) clusters = [] for ci, c in enumerate(blocks): clusters.append(Cluster(ci, c)) ci_vector = ClusteredState(clusters) ci_vector.init(((3, 3), (0, 0))) ci_vector.init(((2, 2), (1, 1))) ci_vector.init(((1, 3), (2, 0))) ci_vector.init(((3, 1), (0, 2))) ci_vector.init(((3, 2), (0, 1))) ci_vector.init(((2, 3), (1, 0))) #ci_vector.init(((1,1),(1,1),(0,0),(0,0))) print(" Clusters:") [print(ci) for ci in clusters] clustered_ham = ClusteredOperator(clusters) print(" Add 1-body terms") clustered_ham.add_1b_terms(h) print(" Add 2-body terms")
def do_2body_search(blocks, init_fspace, h, g, max_cluster_size=4, max_iter_cmf=10, do_pt2=True): """ Sort the cluster pairs based on how much correlation energy is recovered when combined """ # {{{ dimer_energies = {} init_dim = 1 clusters = [] for ci,c in enumerate(blocks): clusters.append(Cluster(ci,c)) for ci,c in enumerate(clusters): init_dim = init_dim * calc_nchk(c.n_orb,init_fspace[ci][0]) init_dim = init_dim * calc_nchk(c.n_orb,init_fspace[ci][1]) for i in range(len(blocks)): for j in range(i+1,len(blocks)): if len(blocks[i]) + len(blocks[j]) > max_cluster_size: continue new_block = [] new_block.extend(blocks[i]) new_block.extend(blocks[j]) new_block = sorted(new_block) new_blocks = [new_block] new_init_fspace = [(init_fspace[i][0]+init_fspace[j][0],init_fspace[i][1]+init_fspace[j][1])] for k in range(len(blocks)): if k!=i and k!=j: new_blocks.append(blocks[k]) new_init_fspace.append(init_fspace[k]) new_init_fspace = tuple(new_init_fspace) new_clusters = [] for ci,c in enumerate(new_blocks): new_clusters.append(Cluster(ci,c)) new_ci_vector = ClusteredState() new_ci_vector.init(new_clusters,new_init_fspace) ## unless doing PT2, make sure new dimension is greater than 1 if do_pt2 == False: dim = 1 for ci,c in enumerate(new_clusters): dim = dim * calc_nchk(c.n_orb,new_init_fspace[ci][0]) dim = dim * calc_nchk(c.n_orb,new_init_fspace[ci][1]) if dim <= init_dim: continue print(" Clusters:") [print(ci) for ci in new_clusters] new_clustered_ham = ClusteredOperator(new_clusters) print(" Add 1-body terms") new_clustered_ham.add_1b_terms(cp.deepcopy(h)) print(" Add 2-body terms") new_clustered_ham.add_2b_terms(cp.deepcopy(g)) #clustered_ham.combine_common_terms(iprint=1) # Get CMF reference print(" Let's do CMF for blocks %4i:%-4i"%(i,j)) e_curr,converged = cmf(new_clustered_ham, new_ci_vector, cp.deepcopy(h), cp.deepcopy(g), max_iter=10, max_nroots=1) if do_pt2: e2, v = compute_pt2_correction(new_ci_vector, new_clustered_ham, e_curr) print(" PT2 Energy Total = %12.8f" %(e_curr+e2)) e_curr += e2 print(" Pairwise-CMF(%i,%i) Energy = %12.8f" %(i,j,e_curr)) dimer_energies[(i,j)] = e_curr import operator dimer_energies = OrderedDict(sorted(dimer_energies.items(), key=lambda x: x[1])) for d in dimer_energies: print(" || %10s | %12.8f" %(d,dimer_energies[d])) pairs = list(dimer_energies.keys()) if len(pairs) == 0: return blocks, init_fspace #target_pair = next(iter(dimer_energies)) target_pair = pairs[0] i = target_pair[0] j = target_pair[1] print(target_pair) new_block = [] new_block.extend(blocks[i]) new_block.extend(blocks[j]) new_blocks = [new_block] new_init_fspace = [(init_fspace[i][0]+init_fspace[j][0],init_fspace[i][1]+init_fspace[j][1])] for k in range(len(blocks)): if k!=i and k!=j: new_blocks.append(blocks[k]) new_init_fspace.append(init_fspace[k]) print(" This is the new clustering") print(" | %12.8f" %dimer_energies[(i,j)], new_blocks) new_init_fspace = tuple(new_init_fspace) print(new_init_fspace) print() return new_blocks, new_init_fspace
def run_hierarchical_sci(h,g,blocks,init_fspace,dimer_threshold,ecore): """ compute a dimer calculation and figure out what states to retain for a larger calculation """ # {{{ fclusters = [] findex_list = {} for ci,c in enumerate(blocks): fclusters.append(Cluster(ci,c)) #findex_list[c] = {} n_blocks = len(blocks) for ca in range(0,n_blocks): for cb in range(ca+1,n_blocks): f_idx = [ca,cb] s_blocks = [blocks[ca],blocks[cb]] s_fspace = ((init_fspace[ca]),(init_fspace[cb])) print("Blocks:",ca,cb) print(s_blocks) print(s_fspace) idx = [j for sub in s_blocks for j in sub] # h2 h2 = h[:,idx] h2 = h2[idx,:] print(h2) g2 = g[:,:,:,idx] g2 = g2[:,:,idx,:] g2 = g2[:,idx,:,:] g2 = g2[idx,:,:,:] #do not want clusters to be wierdly indexed. print(len(s_blocks[0])) print(len(s_blocks[1])) s_blocks = [range(0,len(s_blocks[0])),range(len(s_blocks[0]),len(s_blocks[0])+len(s_blocks[1]))] s_clusters = [] for ci,c in enumerate(s_blocks): s_clusters.append(Cluster(ci,c)) #Cluster States initial guess ci_vector = ClusteredState() ci_vector.init(s_clusters,(s_fspace)) ci_vector.print_configs() print(" Clusters:") [print(ci) for ci in s_clusters] #Clustered Hamiltonian clustered_ham = ClusteredOperator(s_clusters) print(" Add 1-body terms") clustered_ham.add_1b_terms(h2) print(" Add 2-body terms") clustered_ham.add_2b_terms(g2) do_cmf = 0 if do_cmf: # Get CMF reference cmf(clustered_ham, ci_vector, h2, g2, max_iter=10,max_nroots=50) else: # Get vaccum reference for ci_idx, ci in enumerate(s_clusters): print() print(" Form basis by diagonalize local Hamiltonian for cluster: ",ci_idx) ci.form_eigbasis_from_ints(h2,g2,max_roots=50) print(" Build these local operators") print(" Build mats for cluster ",ci.idx) ci.build_op_matrices() ci_vector.expand_to_full_space() H = build_full_hamiltonian(clustered_ham, ci_vector) vguess = ci_vector.get_vector() #e,v = scipy.sparse.linalg.eigsh(H,1,v0=vguess,which='SA') e,v = scipy.sparse.linalg.eigsh(H,1,which='SA') idx = e.argsort() e = e[idx] v = v[:,idx] v0 = v[:,0] e0 = e[0] print(" Ground state of CI: %12.8f CI Dim: %4i "%(e[0].real,len(ci_vector))) ci_vector.zero() ci_vector.set_vector(v0) ci_vector.print_configs() for fspace, configs in ci_vector.data.items(): for ci_idx, ci in enumerate(s_clusters): print("fspace",fspace[ci_idx]) print("ci basis old\n",ci.basis[fspace[ci_idx]]) vec = ci.basis[fspace[ci_idx]] #print(configs.items()) idx = [] for configi,coeffi in configs.items(): #print(configi[ci_idx],coeffi) if abs(coeffi) > dimer_threshold: if configi[ci_idx] not in idx: idx.append(configi[ci_idx]) print("IDX of Cluster") print(ci_idx,f_idx[ci_idx]) print(idx) try: findex_list[f_idx[ci_idx],fspace[ci_idx]] = sorted(list(set(findex_list[f_idx[ci_idx],fspace[ci_idx]]) | set(idx))) #ci.cs_idx[fspace[ci_idx]] = sorted(list(set(ci.cs_idx[fspace[ci_idx]]) | set(idx))) except: #print(findex_list[ci_idx][fspace[ci_idx]]) findex_list[f_idx[ci_idx],fspace[ci_idx]] = sorted(idx) #ci.cs_idx[fspace[ci_idx]] = sorted(idx) ### TODO # first : have to save these indices in fcluster obtect and not the s_cluster. so need to change that, # second: have to move the rest of the code in the block to outside pair loop. loop over fspace # look at indices kept for fspace and vec also is in fspace. and then prune it. print(vec.shape) print(findex_list[f_idx[ci_idx],fspace[ci_idx]]) vec = vec[:,findex_list[f_idx[ci_idx],fspace[ci_idx]]] #vec = vec[:,idx] print("ci basis new\n") print(vec) fclusters[f_idx[ci_idx]].basis[fspace[ci_idx]] = vec #print(ci.basis[fspace[ci_idx]]) print("Fock indices") print(findex_list) for ci_idx, ci in enumerate(fclusters): ci.build_op_matrices() #print(findex_list[ci_idx]) print(" *====================================================================.") print(" | Tensor Product Selected Configuration Interaction |") print(" *====================================================================*") #Cluster States initial guess ci_vector = ClusteredState() ci_vector.init(fclusters,(init_fspace)) print(" Clusters:") [print(ci) for ci in fclusters] #Clustered Hamiltonian clustered_ham = ClusteredOperator(fclusters) print(" Add 1-body terms") clustered_ham.add_1b_terms(h) print(" Add 2-body terms") clustered_ham.add_2b_terms(g) ci_vector, pt_vector, etci, etci2,l = tpsci_tucker(ci_vector.copy(), clustered_ham,thresh_cipsi=1e-6, thresh_ci_clip=5e-4,asci_clip=0) #ci_vector, pt_vector, etci, etci2 = tp_cipsi(ci_vector.copy(), clustered_ham,thresh_cipsi=1e-10, thresh_ci_clip=5e-6,asci_clip=0.01) print(" TPSCI: %12.8f Dim:%6d" % (etci+ecore, len(ci_vector))) print(" TPSCI(2): %12.8f Dim:%6d" % (etci2+ecore,len(pt_vector)))
def extrapolate_pt2_correction(ci_vector, clustered_ham, e0, nsteps = 10, start = 1, stop = 1e-4, thresh_search = 0, pt_type = 'en', scale = 'log', matvec = 3, nproc = None): # {{{ print() print(" Extrapolate PT2 Correction") print(" |pt_type : ", pt_type ) print(" |thresh_search : ", thresh_search ) print(" |start : ", start ) print(" |stop : ", stop ) print(" |nsteps : ", nsteps ) print(" |scale : ", scale ) print(" NYI!") exit() print(" E0: ", e0) if scale=='log': stepsize = np.log((start - stop)/nsteps) #stepsize = -(np.log(start) - np.log(stop))/nsteps print(" Stepsize: ", stepsize) asci1 = start asci2 = start*np.exp(stepsize) steps = [] for asci_iter in range(nsteps): steps.append((asci1, asci2)) asci1 *= np.exp(stepsize) asci2 *= np.exp(stepsize) else: print("NYI") exit() pt_vector = ClusteredState() count = 0 for asci1,asci2 in steps: asci_v = ci_vector.copy() asci_v.clip(asci2, max=asci1) count += len(asci_v) print(" Collect configs between %12.2e and %12.2e: Size: %7i Norm: %12.8f" %( asci1, asci2, len(asci_v), asci_v.norm())) if len(asci_v) == 0: continue print(" Compute Matrix Vector Product:", flush=True) start = time.time() if matvec == 1: pt_vector_curr = matvec1_parallel1(clustered_ham, asci_v, nproc=nproc, thresh_search=thresh_search) elif matvec == 2: pt_vector_curr = matvec1_parallel2(clustered_ham, asci_v, nproc=nproc, thresh_search=thresh_search) elif matvec == 3: pt_vector_curr = matvec1_parallel3(clustered_ham, asci_v, nproc=nproc, thresh_search=thresh_search) elif matvec == 4: pt_vector_curr = matvec1_parallel4(clustered_ham, asci_v, nproc=nproc, thresh_search=thresh_search) else: print(" wrong matvec") exit() #pt_vector_curr = matvec1_parallel3(clustered_ham, asci_v, nproc=nproc, thresh_search=thresh_search) pt_vector.add(pt_vector_curr) stop = time.time() print(" Time spent in matvec: %12.2f" %( stop-start)) e0_curr = ci_vector.dot(pt_vector) print(" Zeroth-order energy: %12.8f " %e0_curr) pt_vector.prune_empty_fock_spaces() tmp = ci_vector.dot(pt_vector) var = pt_vector.dot(pt_vector) - tmp*tmp print(" Variance Subspace: %12.8f" % var,flush=True) print(" Remove CI space from pt_vector vector") for fockspace,configs in pt_vector.items(): if fockspace in ci_vector.fblocks(): for config,coeff in list(configs.items()): if config in ci_vector[fockspace]: del pt_vector[fockspace][config] for fockspace,configs in ci_vector.items(): if fockspace in pt_vector: for config,coeff in configs.items(): assert(config not in pt_vector[fockspace]) print(" Norm of CI vector = %12.8f" %ci_vector.norm()) print(" Dimension of CI space: ", len(ci_vector)) print(" Dimension of PT space: ", len(pt_vector)) if len(pt_vector) == 0: print("No more connecting config found") break print(" Compute Denominator",flush=True) #exit() pt_vector.prune_empty_fock_spaces() # Build Denominator if pt_type == 'en': start = time.time() if nproc==1: Hd = update_hamiltonian_diagonal(clustered_ham, pt_vector, Hd_vector) else: Hd = build_hamiltonian_diagonal_parallel1(clustered_ham, pt_vector, nproc=nproc) end = time.time() print(" Time spent in demonimator: %12.2f" %( end - start), flush=True) denom = 1/(e0 - Hd) elif pt_type == 'mp': start = time.time() # get barycentric MP zeroth order energy e0_mp = 0 for f,c,v in ci_vector: for ci in clustered_ham.clusters: e0_mp += ci.ops['H_mf'][(f[ci.idx],f[ci.idx])][c[ci.idx],c[ci.idx]] * v * v print(" Zeroth-order MP energy: %12.8f" %e0_mp, flush=True) # This is not really MP once we have rotated away from the CMF basis. # H = F + (H - F), where F = sum_I F(I) # # After Tucker basis, we just use the diagonal of this fock operator. # Not ideal perhaps, but better than nothing at this stage denom = np.zeros(len(pt_vector)) idx = 0 for f,c,v in pt_vector: e0_X = 0 for ci in clustered_ham.clusters: e0_X += ci.ops['H_mf'][(f[ci.idx],f[ci.idx])][c[ci.idx],c[ci.idx]] denom[idx] = 1/(e0_mp - e0_X) idx += 1 end = time.time() print(" Time spent in demonimator: %12.2f" %( end - start), flush=True) pt_vector_v = pt_vector.get_vector() pt_vector_v.shape = (pt_vector_v.shape[0]) e2 = np.multiply(denom,pt_vector_v) e2 = np.dot(pt_vector_v,e2) ecore = clustered_ham.core_energy print(" PT2 Energy Correction = %12.8f" %e2) print(" PT2 Energy Total = %12.8f" %(e0+e2+ecore)) assert(count == len(ci_vector)) return e2, pt_vector
def rotate(self,Kpq): Kpq = Kpq.reshape(self.h.shape[0],self.h.shape[1]) # remove frozen rotations for freeze in self.to_freeze: for bi in freeze: for bj in freeze: for bii in self.blocks[bi]: for bjj in self.blocks[bj]: Kpq[bii,bjj] = 0 h = self.h g = self.g C = self.C blocks = self.blocks init_fspace = self.init_fspace clustered_ham = self.clustered_ham ci_vector = self.ci_vector cmf_dm_guess = self.cmf_dm_guess from scipy.sparse.linalg import expm U = expm(Kpq) #form unitary C = C @ U #rotate coeff h = U.T @ h @ U print(h) g = np.einsum("pqrs,pl->lqrs",g,U) g = np.einsum("lqrs,qm->lmrs",g,U) g = np.einsum("lmrs,rn->lmns",g,U) g = np.einsum("lmns,so->lmno",g,U) cmf_dm_guess = (U.T @ self.cmf_dm_guess[0] @ U,U.T @ self.cmf_dm_guess[1] @ U) n_blocks = len(blocks) clusters = [Cluster(ci,c) for ci,c in enumerate(blocks)] print(" Clusters:") [print(ci) for ci in clusters] clustered_ham = ClusteredOperator(clusters, core_energy=self.ecore) print(" Add 1-body terms") clustered_ham.add_local_terms() clustered_ham.add_1b_terms(h) print(" Add 2-body terms") clustered_ham.add_2b_terms(g) ci_vector = ClusteredState() ci_vector.init(clusters, init_fspace) if self.cmf_dm_guess == None: e_curr, converged, rdm_a, rdm_b = cmf(clustered_ham, ci_vector, h, g, max_iter = 20, thresh = self.cmf_ci_thresh) if self.cmf_dm_guess != None: e_curr, converged, rdm_a, rdm_b = cmf(clustered_ham, ci_vector, h, g, diis = True, dm_guess = cmf_dm_guess, max_iter = 20, thresh = self.cmf_ci_thresh) # build cluster basis and operator matrices using CMF optimized density matrices for ci_idx, ci in enumerate(clustered_ham.clusters): #if delta_elec != None: # fspaces_i = init_fspace[ci_idx] # fspaces_i = ci.possible_fockspaces( delta_elec=(fspaces_i[0], fspaces_i[1], delta_elec) ) #else: fspaces_i = ci.possible_fockspaces() print() print(" Form basis by diagonalizing local Hamiltonian for cluster: ",ci_idx) ci.form_fockspace_eigbasis(h, g, fspaces_i, max_roots=self.max_roots, rdm1_a=rdm_a, rdm1_b=rdm_b, ecore=self.ecore) print(" Build operator matrices for cluster ",ci.idx) ci.build_op_matrices() ci.build_local_terms(h,g) self.h = h self.g = g self.C = C self.clustered_ham = clustered_ham self.ci_vector = ci_vector self.cmf_dm_guess = cmf_dm_guess
def init(self,cmf_dm_guess=None): if cmf_dm_guess != None: self.cmf_dm_guess = cmf_dm_guess h = self.h g = self.g C = self.C blocks = self.blocks init_fspace = self.init_fspace if self.do_intra_rots == False: # freeze intra_block_rotations for bi in range(len(self.blocks)): self.freeze_cluster_mixing(bi,(bi,)) n_blocks = len(blocks) clusters = [Cluster(ci,c) for ci,c in enumerate(blocks)] print(" Ecore :%16.8f"%self.ecore) print(" Clusters:") [print(ci) for ci in clusters] clustered_ham = ClusteredOperator(clusters, core_energy=self.ecore) print(" Add 1-body terms") clustered_ham.add_local_terms() clustered_ham.add_1b_terms(h) print(" Add 2-body terms") clustered_ham.add_2b_terms(g) ci_vector = ClusteredState() ci_vector.init(clusters, init_fspace) self.clustered_ham = clustered_ham self.ci_vector = ci_vector if self.cmf_dm_guess == None: e_curr, converged, rdm_a, rdm_b = cmf(clustered_ham, ci_vector, h, g, max_iter = 20, thresh = self.cmf_ci_thresh) if self.cmf_dm_guess != None: e_curr, converged, rdm_a, rdm_b = cmf(clustered_ham, ci_vector, h, g, diis = True, dm_guess = self.cmf_dm_guess, max_iter = 200, thresh = self.cmf_ci_thresh) # store rdm self.cmf_dm_guess = (rdm_a,rdm_b) print(" CMF In Init: %12.8f" %e_curr) # build cluster basis and operator matrices using CMF optimized density matrices for ci_idx, ci in enumerate(clustered_ham.clusters): #if delta_elec != None: # fspaces_i = init_fspace[ci_idx] # fspaces_i = ci.possible_fockspaces( delta_elec=(fspaces_i[0], fspaces_i[1], delta_elec) ) #else: fspaces_i = ci.possible_fockspaces() print() print(" Form basis by diagonalizing local Hamiltonian for cluster: ",ci_idx) ci.form_fockspace_eigbasis(h, g, fspaces_i, max_roots=self.max_roots, rdm1_a=rdm_a, rdm1_b=rdm_b, ecore=self.ecore) print(" Build operator matrices for cluster ",ci.idx) ci.build_op_matrices() ci.build_local_terms(h,g)
def grad(self,Kpq): Kpq = Kpq.reshape(self.h.shape[0],self.h.shape[1]) # remove frozen rotations print(self.to_freeze) for freeze in self.to_freeze: for bi in freeze: for bj in freeze: print(" Freeze orbital mixing between clusters %4i and %4i"%(bi,bj)) for bii in self.blocks[bi]: for bjj in self.blocks[bj]: Kpq[bii,bjj] = 0 h = self.h g = self.g C = self.C blocks = self.blocks init_fspace = self.init_fspace clustered_ham = self.clustered_ham ci_vector = self.ci_vector from scipy.sparse.linalg import expm U = expm(Kpq) print(U) print(U.T @ U) print(h) C = C @ U #molden.from_mo(mol, 'h4.molden', C) h = U.T @ h @ U print(h) g = np.einsum("pqrs,pl->lqrs",g,U) g = np.einsum("lqrs,qm->lmrs",g,U) g = np.einsum("lmrs,rn->lmns",g,U) g = np.einsum("lmns,so->lmno",g,U) cmf_dm_guess = (U.T @ self.cmf_dm_guess[0] @ U,U.T @ self.cmf_dm_guess[1] @ U) n_blocks = len(blocks) clusters = [Cluster(ci,c) for ci,c in enumerate(blocks)] print(" Clusters:") [print(ci) for ci in clusters] clustered_ham = ClusteredOperator(clusters, core_energy=self.ecore) print(" Add 1-body terms") clustered_ham.add_local_terms() clustered_ham.add_1b_terms(h) print(" Add 2-body terms") clustered_ham.add_2b_terms(g) ci_vector = ClusteredState() ci_vector.init(clusters, init_fspace) if self.cmf_dm_guess == None: e_curr, converged, rdm_a, rdm_b = cmf(clustered_ham, ci_vector, h, g, max_iter = 20, thresh = self.cmf_ci_thresh) if self.cmf_dm_guess != None: e_curr, converged, rdm_a, rdm_b = cmf(clustered_ham, ci_vector, h, g, diis = True, dm_guess = cmf_dm_guess, max_iter = 20, thresh = self.cmf_ci_thresh) # store rdm (but first rotate them back to the reference basis #self.cmf_dm_guess = (U @ rdm_a @ U.T, U @ rdm_b @ U.T) print(" CMF In grad : %12.8f" %e_curr) # build cluster basis and operator matrices using CMF optimized density matrices for ci_idx, ci in enumerate(clusters): #if delta_elec != None: # fspaces_i = init_fspace[ci_idx] # fspaces_i = ci.possible_fockspaces( delta_elec=(fspaces_i[0], fspaces_i[1], delta_elec) ) #else: fspaces_i = ci.possible_fockspaces() print() print(" Form basis by diagonalizing local Hamiltonian for cluster: ",ci_idx) ci.form_fockspace_eigbasis(h, g, fspaces_i, max_roots=1, rdm1_a=rdm_a, rdm1_b=rdm_b, ecore=self.ecore) print(" Build operator matrices for cluster ",ci.idx) ci.build_op_matrices() ci.build_local_terms(h,g) tm1,tm2 = build_1rdm(ci_vector, clusters) opdm_a,opdm_b, tpdm_aa, tpdm_ab, tpdm_ba, tpdm_bb = build_12rdms_cmf(ci_vector,clusters) ## Compare energy using density to reference energy #compute energy opdm = opdm_a + opdm_b tpdm = tpdm_aa + tpdm_ab + tpdm_ba + tpdm_bb E = np.einsum('pq,pq',h,opdm) E += 0.5 * np.einsum('tvuw,tuwv',g,tpdm) print("Energy with D %16.10f"%E) print("Reference W %16.10f"%e_curr) #reference energy print(opdm_a) print(rdm_a) print(tm1) print(opdm_a - tm1) assert(abs(e_curr-E)<1e-8) #Generalized Fock Gf = np.einsum('pr,rq->pq',h,opdm) + np.einsum('pvuw,quwv->pq',g,tpdm) #Gradient Gpq = Gf - Gf.T if 0: if self.matvec==1: h1_vector = matvec.matvec1(clustered_ham, ci_vector, thresh_search=0, nbody_limit=3) elif self.matvec==2: pt_vector = matvec.matvec1_parallel2(clustered_ham, ci_vector, nproc=self.nproc, nbody_limit=3) elif self.matvec==3: pt_vector = matvec.matvec1_parallel3(clustered_ham, ci_vector, nproc=self.nproc, nbody_limit=3) #elif matvec==4: # pt_vector = matvec.matvec1_parallel4(clustered_ham, ci_vector, nproc=self.nproc, nbody_limit=3, # shared_mem=shared_mem, batch_size=batch_size) else: print(" Wrong option for matvec") exit() #h1_vector.print_configs() rdm_a1, rdm_b1 = build_tdm(ci_vector,h1_vector,clustered_ham) rdm_a2, rdm_b2 = build_tdm(h1_vector,ci_vector,clustered_ham) print("Gradient") Gpq = rdm_a1+rdm_b1-rdm_a2-rdm_b2 #print("CurrCMF:%12.8f Grad:%12.8f dE:%10.6f"%(e_curr,np.linalg.norm(grad),e_curr-self.e)) #print("CurrCMF:%12.8f Grad:%12.8f "%(e_curr,np.linalg.norm(Gpq))) print(Gpq) print("NormGrad1",np.linalg.norm(Gpq)) # remove frozen rotations for freeze in self.to_freeze: for bi in freeze: for bj in freeze: for bii in self.blocks[bi]: for bjj in self.blocks[bj]: Gpq[bii,bjj] = 0 self.gradient = Gpq.ravel() return Gpq.ravel()
cisolver = fci.direct_spin1.FCI(mol) #e, ci = cisolver.kernel(h1, eri, h1.shape[1], 2, ecore=mol.energy_nuc()) e, ci = cisolver.kernel(h, g, h.shape[1], mol.nelectron, ecore=0) print(" FCI: %12.8f" % e) blocks = [[0], [1], [2], [3], [4], [5], [6], [7], [8], [9]] blocks = [[0], [1], [2], [3], [4], [5]] blocks = [[0, 1, 2], [3, 4, 5]] blocks = [range(nel), range(nel, 2 * nel)] n_blocks = len(blocks) clusters = [] for ci, c in enumerate(blocks): clusters.append(Cluster(ci, c)) ci_vector = ClusteredState(clusters) #ci_vector.init(((2,2),(0,0))) #ci_vector.init(((2,2),(2,2),(0,0))) #ci_vector.init(((2,2),(2,2),(0,0),(0,0))) #ci_vector.init(((5,5),(0,0))) #ci_vector.init(((1,1),(1,1),(1,1),(1,1),(1,1),(0,0),(0,0),(0,0),(0,0),(0,0))) #ci_vector.init(((1,1),(1,1),(1,1),(0,0),(0,0),(0,0))) #ci_vector.init(((2,1),(1,2))) ci_vector.init(((nel, nel), (0, 0))) ci_vector.init(((nel - 1, nel - 1), (1, 1))) ci_vector.init(((nel, nel - 2), (0, 2))) ci_vector.init(((nel - 2, nel), (2, 0))) print(" Clusters:") [print(ci) for ci in clusters]
def ex_tp_cipsi(ci_vector, clustered_ham, thresh_cipsi = 1e-4, thresh_conv = 1e-8, max_iter = 30, n_roots = 3, thresh_asci = 0, nbody_limit = 4, pt_type = 'en', thresh_search = 0, shared_mem = 1e9, batch_size = 1, matvec = 4, nproc = None ): """ +====================================================================+ Excited State TPSCI +====================================================================+ ci_vector: the configutation space for all the needed roots. this is tricky since a pre computation of a CIS type calculation needs to be done for this. see test/excited_test.py thresh_cipsi : include qspace configurations into pspace that have probabilities larger than this value thresh_conv : stop selected CI when delta E is smaller than this value thresh_asci : only consider couplings to pspace configs with probabilities larger than this value thresh_search : delete couplings to pspace configs default: thresh_cipsi^1/2 / 1000 nbody_limit : only compute up to n-body interactions when searching for new configs shared_mem : How much memory to allocate for shared object store for holding clustered_ham - only works with matvec4 matvec : Which version of matvec to use? [1:4] """ # {{{ print() print(" Excited TPSCI options: ") print(" |thresh_cipsi : ", thresh_cipsi ) print(" |thresh_conv : ", thresh_conv ) print(" |thresh_search : ", thresh_search ) print(" |max_iter : ", max_iter ) print(" |n_roots : ", n_roots ) print(" |thresh_asci : ", thresh_asci ) print(" |nbody_limit : ", nbody_limit ) print(" |pt_type : ", pt_type ) print(" |nproc : ", nproc ) ecore = clustered_ham.core_energy print(" Core energy: %16.12f" %ecore) pt_vector = ci_vector.copy() #Hd_vector = ClusteredState(ci_vector.clusters) Hd_vector = ClusteredState() e_prev = np.zeros(n_roots) for it in range(max_iter+1): print() print(" ===================================================================") print(" Selected CI Iteration: %4i epsilon: %12.8f" %(it,thresh_cipsi)) print(" ===================================================================") print(" Build full Hamiltonian",flush=True) start = time.time() if it>0: H = grow_hamiltonian_parallel(H, clustered_ham, ci_vector, ci_vector_old) else: H = build_full_hamiltonian_parallel2(clustered_ham, ci_vector, nproc=nproc) ci_vector_old = ci_vector.copy() stop = time.time() print(" Time spent building Hamiltonian matrix: %12.2f" %(stop-start)) print(" Diagonalize Hamiltonian Matrix:",flush=True) vguess = ci_vector.get_vector() if H.shape[0] > 100 and abs(np.sum(vguess)) >0: e,v = scipy.sparse.linalg.eigsh(H,n_roots,v0=vguess,which='SA') else: e,v = np.linalg.eigh(H) idx = e.argsort() e = e[idx] v = v[:,idx] v0 = v[:,:n_roots] e0 = e[:n_roots] old_dim = len(ci_vector) # store all vectors for the excited states all_vecs = [] for rn in range(n_roots): print("Root:%4d Energy:%12.8f CI Dim: %4i "%(rn,e[rn].real,len(ci_vector))) vec = ci_vector.copy() vec.zero() vec.set_vector(v[:,rn]) all_vecs.append(vec) vec.print() #check convergence e_diff = e0 - e_prev delta_e = np.linalg.norm(e_diff) e_prev = e0 if abs(delta_e) < thresh_conv: print(" Converged: TPSCI") break print(" Next iteration CI space dimension", len(ci_vector)) all_pt_vecs = [] e2_energies = [] for rn,vec in enumerate(all_vecs): start = time.time() if nbody_limit != 4: print(" Warning: nbody_limit set to %4i, resulting PT energies are meaningless" %nbody_limit) asci_vector = vec.copy() print(" Choose subspace from which to search for new configs. Thresh: ", thresh_asci) print(" CI Dim : %8i" % len(asci_vector)) kept_indices = asci_vector.clip(thresh_asci) print(" Search Dim : %8i Norm: %12.8f" %( len(asci_vector), asci_vector.norm())) if matvec==1: pt_vector = matvec1_parallel1(clustered_ham, asci_vector, nproc=nproc, thresh_search=thresh_search, nbody_limit=nbody_limit) elif matvec==2: pt_vector = matvec1_parallel2(clustered_ham, asci_vector, nproc=nproc, thresh_search=thresh_search, nbody_limit=nbody_limit) elif matvec==3: pt_vector = matvec1_parallel3(clustered_ham, asci_vector, nproc=nproc, thresh_search=thresh_search, nbody_limit=nbody_limit) elif matvec==4: pt_vector = matvec1_parallel4(clustered_ham, asci_vector, nproc=nproc, thresh_search=thresh_search, nbody_limit=nbody_limit, shared_mem=shared_mem, batch_size=batch_size) stop = time.time() print(" Time spent in matvec: %12.2f" %( stop-start)) pt_vector.prune_empty_fock_spaces() print(" Remove CI space from pt_vector vector") for fockspace,configs in pt_vector.items(): if fockspace in vec.fblocks(): for config,coeff in list(configs.items()): if config in vec[fockspace]: del pt_vector[fockspace][config] for fockspace,configs in vec.items(): if fockspace in pt_vector: for config,coeff in configs.items(): assert(config not in pt_vector[fockspace]) print(" Norm of CI vector = %12.8f" %vec.norm()) print(" Dimension of CI space: ", len(vec)) print(" Dimension of PT space: ", len(pt_vector)) if len(pt_vector) == 0: print("No more connecting config found") break print(" Compute Denominator",flush=True) #exit() pt_vector.prune_empty_fock_spaces() #import cProfile #pr = cProfile.Profile() #pr.enable() # Build Denominator if pt_type == 'en': start = time.time() if nproc==1: Hd = update_hamiltonian_diagonal(clustered_ham, pt_vector, Hd_vector) else: Hd = build_hamiltonian_diagonal_parallel1(clustered_ham, pt_vector, nproc=nproc) #pr.disable() #pr.print_stats(sort='time') end = time.time() print(" Time spent in denomimator: %12.2f" %( end - start), flush=True) denom = 1/(e0[rn] - Hd) elif pt_type == 'mp': start = time.time() # get barycentric MP zeroth order energy e0_mp = 0 for f,c,v in vec: for ci in clustered_ham.clusters: e0_mp += ci.ops['H_mf'][(f[ci.idx],f[ci.idx])][c[ci.idx],c[ci.idx]] * v * v print(" Zeroth-order MP energy: %12.8f" %e0_mp, flush=True) # This is not really MP once we have rotated away from the CMF basis. # H = F + (H - F), where F = sum_I F(I) # # After Tucker basis, we just use the diagonal of this fock operator. # Not ideal perhaps, but better than nothing at this stage denom = np.zeros(len(pt_vector)) idx = 0 for f,c,v in pt_vector: e0_X = 0 for ci in clustered_ham.clusters: e0_X += ci.ops['H_mf'][(f[ci.idx],f[ci.idx])][c[ci.idx],c[ci.idx]] denom[idx] = 1/(e0_mp - e0_X) idx += 1 end = time.time() print(" Time spent in denomimator: %12.2f" %( end - start), flush=True) pt_vector_v = pt_vector.get_vector() pt_vector_v.shape = (pt_vector_v.shape[0]) e2 = np.multiply(denom,pt_vector_v) pt_vector.set_vector(e2) e2 = np.dot(pt_vector_v,e2) print(" PT2 Energy Correction = %12.8f" %e2) print(" PT2 Energy Total = %12.8f" %(e0[rn]+e2)) all_pt_vecs.append(pt_vector) e2_energies.append(e2) if it >= max_iter: print(" Maxcycles: TPSCI") break for pt_vector in all_pt_vecs: print(" Choose which states to add to CI space", flush=True) for fockspace,configs in pt_vector.items(): for config,coeff in configs.items(): if coeff*coeff > thresh_cipsi: if fockspace in ci_vector: ci_vector[fockspace][config] = 0 else: ci_vector.add_fockspace(fockspace) ci_vector[fockspace][config] = 0 print(" Dimension of next CI space: ", len(ci_vector)) return all_vecs, all_pt_vecs, e0, e0+np.array(e2_energies)
def tp_hbci(ci_vector, clustered_ham, thresh_cipsi = 1e-4, thresh_ci_clip = 1e-5, thresh_conv = 1e-8, max_iter = 30, n_roots = 1, thresh_asci = 0, nbody_limit = 4, thresh_search = 0, shared_mem = 1e9, batch_size = 1, matvec = 4, nproc=None): # {{{ print() print(" HB-TPSCI options: ") print(" |thresh_cipsi : ", thresh_cipsi ) print(" |thresh_ci_clip : ", thresh_ci_clip ) print(" |thresh_conv : ", thresh_conv ) print(" |max_iter : ", max_iter ) print(" |n_roots : ", n_roots ) print(" |thresh_asci : ", thresh_asci ) print(" |nbody_limit : ", nbody_limit ) print(" |nproc : ", nproc ) pt_vector = ci_vector.copy() Hd_vector = ClusteredState() e_prev = 0 for it in range(max_iter): print() print(" ===================================================================") print(" Selected CI Iteration: %4i epsilon: %12.8f" %(it,thresh_cipsi)) print(" ===================================================================") print(" Build full Hamiltonian",flush=True) start = time.time() if nproc==1: H = build_full_hamiltonian(clustered_ham, ci_vector) else: H = build_full_hamiltonian_parallel2(clustered_ham, ci_vector, nproc=nproc) stop = time.time() print(" Time spent building Hamiltonian matrix: %12.2f" %(stop-start)) print(" Diagonalize Hamiltonian Matrix:",flush=True) vguess = ci_vector.get_vector() if H.shape[0] > 100 and abs(np.sum(vguess)) >0: e,v = scipy.sparse.linalg.eigsh(H,n_roots,v0=vguess,which='SA') else: e,v = np.linalg.eigh(H) idx = e.argsort() e = e[idx] v = v[:,idx] v0 = v[:,0] e0 = e[0] print(" Ground state of CI: %12.8f CI Dim: %4i "%(e[0].real,len(ci_vector))) ci_vector.zero() ci_vector.set_vector(v0) old_dim = len(ci_vector) if thresh_ci_clip > 0: print(" Clip CI Vector: thresh = ", thresh_ci_clip) print(" Old CI Dim: ", len(ci_vector)) kept_indices = ci_vector.clip(np.sqrt(thresh_ci_clip)) ci_vector.normalize() print(" New CI Dim: ", len(ci_vector)) if len(ci_vector) < old_dim: H = H[:,kept_indices][kept_indices,:] print(" Diagonalize Clipped Hamiltonian Matrix:",flush=True) vguess = ci_vector.get_vector() e,v = scipy.sparse.linalg.eigsh(H,n_roots,v0=vguess,which='SA') #e,v = np.linalg.eigh(H) idx = e.argsort() e = e[idx] v = v[:,idx] v0 = v[:,0] e0 = e[0] print(" Ground state of CI: %12.8f CI Dim: %4i "%(e[0].real,len(ci_vector))) ci_vector.zero() ci_vector.set_vector(v0) ci_vector.print() asci_vector = ci_vector.copy() print(" Choose subspace from which to search for new configs. Thresh: ", thresh_asci) print(" CI Dim : %8i" % len(asci_vector)) kept_indices = asci_vector.clip(thresh_asci) print(" Search Dim : %8i Norm: %12.8f" %( len(asci_vector), asci_vector.norm())) #asci_vector.normalize() print(" Perform Heat-Bath selection to find new configurations:",flush=True) start=time.time() if matvec==1: pt_vector = matvec1_parallel1(clustered_ham, asci_vector, nproc=nproc, thresh_search=thresh_search, nbody_limit=nbody_limit) elif matvec==2: pt_vector = matvec1_parallel2(clustered_ham, asci_vector, nproc=nproc, thresh_search=thresh_search, nbody_limit=nbody_limit) elif matvec==3: pt_vector = matvec1_parallel3(clustered_ham, asci_vector, nproc=nproc, thresh_search=thresh_search, nbody_limit=nbody_limit) elif matvec==4: pt_vector = matvec1_parallel4(clustered_ham, asci_vector, nproc=nproc, thresh_search=thresh_search, nbody_limit=nbody_limit, shared_mem=shared_mem, batch_size=batch_size) #pt_vector = heat_bath_search(clustered_ham, ci_vector, thresh_cipsi=thresh_cipsi, nproc=nproc) stop=time.time() print(" Number of new configurations found : ", len(pt_vector)) print(" Time spent in heat bath search: %12.2f" %(stop-start),flush=True) print(" Remove CI space from results") for fockspace,configs in pt_vector.items(): if fockspace in ci_vector.fblocks(): for config,coeff in list(configs.items()): if config in ci_vector[fockspace]: del pt_vector[fockspace][config] pt_vector.prune_empty_fock_spaces() print(" Number of new configurations found (pruned): ", len(pt_vector)) for fockspace,configs in ci_vector.items(): if fockspace in pt_vector: for config,coeff in configs.items(): assert(config not in pt_vector[fockspace]) pt_vector.print() print(" Add new states to CI space", flush=True) print(" Dimension of CI space : ", len(ci_vector)) start = time.time() pt_vector.zero() ci_vector.add(pt_vector) end = time.time() print(" Dimension of next CI space: ", len(ci_vector)) print(" Time spent in finding new CI space: %12.2f" %(end - start), flush=True) start = time.time() delta_e = e0 - e_prev e_prev = e0 if len(ci_vector) <= old_dim and abs(delta_e) < thresh_conv: print(" Converged") break print(" Next iteration CI space dimension", len(ci_vector)) return ci_vector, e0
def tp_cipsi(ci_vector, clustered_ham, thresh_cipsi = 1e-4, thresh_ci_clip = 1e-5, thresh_conv = 1e-8, max_iter = 30, n_roots = 1, thresh_asci = 0, nbody_limit = 4, pt_type = 'en', thresh_search = 0, shared_mem = 1e9, batch_size = 1, matvec = 4, nproc = None ): """ thresh_cipsi : include qspace configurations into pspace that have probabilities larger than this value thresh_ci_clip : drop pspace configs whose variational solution yields probabilities smaller than this value thresh_conv : stop selected CI when delta E is smaller than this value thresh_asci : only consider couplings to pspace configs with probabilities larger than this value thresh_search : delete couplings to pspace configs default: thresh_cipsi^1/2 / 1000 nbody_limit : only compute up to n-body interactions when searching for new configs shared_mem : How much memory to allocate for shared object store for holding clustered_ham - only works with matvec4 matvec : Which version of matvec to use? [1:4] """ # {{{ print() print(" TPSCI options: ") print(" |thresh_cipsi : ", thresh_cipsi ) print(" |thresh_ci_clip : ", thresh_ci_clip ) print(" |thresh_conv : ", thresh_conv ) print(" |thresh_search : ", thresh_search ) print(" |max_iter : ", max_iter ) print(" |n_roots : ", n_roots ) print(" |thresh_asci : ", thresh_asci ) print(" |nbody_limit : ", nbody_limit ) print(" |pt_type : ", pt_type ) print(" |nproc : ", nproc ) pt_vector = ci_vector.copy() #Hd_vector = ClusteredState(ci_vector.clusters) Hd_vector = ClusteredState() e_prev = 0 for it in range(max_iter+1): print() print(" ===================================================================") print(" Selected CI Iteration: %4i epsilon: %12.8f" %(it,thresh_cipsi)) print(" ===================================================================") print(" Build full Hamiltonian",flush=True) start = time.time() if it>0: H = grow_hamiltonian_parallel(H, clustered_ham, ci_vector, ci_vector_old) else: H = build_full_hamiltonian_parallel2(clustered_ham, ci_vector, nproc=nproc) ci_vector_old = ci_vector.copy() stop = time.time() print(" Time spent building Hamiltonian matrix: %12.2f" %(stop-start)) print(" Diagonalize Hamiltonian Matrix:",flush=True) vguess = ci_vector.get_vector() if H.shape[0] > 100 and abs(np.sum(vguess)) >0: e,v = scipy.sparse.linalg.eigsh(H,n_roots,v0=vguess,which='SA') else: e,v = np.linalg.eigh(H) idx = e.argsort() e = e[idx] v = v[:,idx] v0 = v[:,0] e0 = e[0] print(" Ground state of CI: %12.8f CI Dim: %4i "%(e[0].real,len(ci_vector))) ci_vector.zero() ci_vector.set_vector(v0) old_dim = len(ci_vector) if thresh_ci_clip > 0: print(" Clip CI Vector: thresh = ", thresh_ci_clip) print(" Old CI Dim: ", len(ci_vector)) kept_indices = ci_vector.clip(np.sqrt(thresh_ci_clip)) ci_vector.normalize() print(" New CI Dim: ", len(ci_vector)) if len(ci_vector) < old_dim: H = H[:,kept_indices][kept_indices,:] print(" Diagonalize Clipped Hamiltonian Matrix:",flush=True) vguess = ci_vector.get_vector() e,v = scipy.sparse.linalg.eigsh(H,n_roots,v0=vguess,which='SA') #e,v = np.linalg.eigh(H) idx = e.argsort() e = e[idx] v = v[:,idx] v0 = v[:,0] e0 = e[0] print(" Ground state of CI: %12.8f CI Dim: %4i "%(e[0].real,len(ci_vector))) ci_vector.zero() ci_vector.set_vector(v0) ci_vector_old = ci_vector.copy() ecore = clustered_ham.core_energy print(" Core energy: %16.12f" %ecore) #print(" TPSCI Iter %3i Elec Energy: %12.8f Total Energy: %12.8f CI Dim: %4i "%(it, e[0].real,e[0].real+ecore,len(ci_vector))) #print(" Ground state of CI: %12.8f CI Dim: %4i "%(e[0].real,len(ci_vector))) print(" TPSCI Iter %3i: %12.8f CI Dim: %4i "%(it, e[0].real,len(ci_vector))) ci_vector.print() delta_e = e0 - e_prev e_prev = e0 if len(ci_vector) <= old_dim and abs(delta_e) < thresh_conv: print(" Converged: TPSCI") break print(" Next iteration CI space dimension", len(ci_vector)) asci_vector = ci_vector.copy() print(" Choose subspace from which to search for new configs. Thresh: ", thresh_asci) print(" CI Dim : %8i" % len(asci_vector)) kept_indices = asci_vector.clip(thresh_asci) print(" Search Dim : %8i Norm: %12.8f" %( len(asci_vector), asci_vector.norm())) #asci_vector.normalize() print(" Compute Matrix Vector Product:", flush=True) profile = 0 if profile: import cProfile pr = cProfile.Profile() pr.enable() start = time.time() if nbody_limit != 4: print(" Warning: nbody_limit set to %4i, resulting PT energies are meaningless" %nbody_limit) if matvec==1: pt_vector = matvec1_parallel1(clustered_ham, asci_vector, nproc=nproc, thresh_search=thresh_search, nbody_limit=nbody_limit) elif matvec==2: pt_vector = matvec1_parallel2(clustered_ham, asci_vector, nproc=nproc, thresh_search=thresh_search, nbody_limit=nbody_limit) elif matvec==3: pt_vector = matvec1_parallel3(clustered_ham, asci_vector, nproc=nproc, thresh_search=thresh_search, nbody_limit=nbody_limit) elif matvec==4: pt_vector = matvec1_parallel4(clustered_ham, asci_vector, nproc=nproc, thresh_search=thresh_search, nbody_limit=nbody_limit, shared_mem=shared_mem, batch_size=batch_size) stop = time.time() print(" Time spent in matvec: %12.2f" %( stop-start)) #exit() # Compute the energy of the zeroth-order energy using matvec. # This gives us some indication how accurate the approximations # (asci_vector, and thresh_search) are e0_curr = ci_vector.dot(pt_vector)/asci_vector.dot(asci_vector) print(" Zeroth-order energy: %12.8f Error in E0: %12.8f" %(e0_curr, e0_curr - e0)) if profile: pr.disable() pr.print_stats(sort='time') pt_vector.prune_empty_fock_spaces() var = pt_vector.dot(pt_vector) - e0*e0 print(" Variance: %12.8f" % var,flush=True) tmp = ci_vector.dot(pt_vector) var = pt_vector.dot(pt_vector) - tmp*tmp print(" Variance Subspace: %12.8f" % var,flush=True) print(" Remove CI space from pt_vector vector") for fockspace,configs in pt_vector.items(): if fockspace in ci_vector.fblocks(): for config,coeff in list(configs.items()): if config in ci_vector[fockspace]: del pt_vector[fockspace][config] for fockspace,configs in ci_vector.items(): if fockspace in pt_vector: for config,coeff in configs.items(): assert(config not in pt_vector[fockspace]) print(" Norm of CI vector = %12.8f" %ci_vector.norm()) print(" Dimension of CI space: ", len(ci_vector)) print(" Dimension of PT space: ", len(pt_vector)) if len(pt_vector) == 0: print("No more connecting config found") break print(" Compute Denominator",flush=True) #exit() pt_vector.prune_empty_fock_spaces() #import cProfile #pr = cProfile.Profile() #pr.enable() # Build Denominator if pt_type == 'en': start = time.time() if nproc==1: Hd = update_hamiltonian_diagonal(clustered_ham, pt_vector, Hd_vector) else: Hd = build_hamiltonian_diagonal_parallel1(clustered_ham, pt_vector, nproc=nproc) #pr.disable() #pr.print_stats(sort='time') end = time.time() print(" Time spent in denomimator: %12.2f" %( end - start), flush=True) denom = 1/(e0 - Hd) elif pt_type == 'mp': start = time.time() # get barycentric MP zeroth order energy e0_mp = 0 for f,c,v in ci_vector: for ci in clustered_ham.clusters: e0_mp += ci.ops['H_mf'][(f[ci.idx],f[ci.idx])][c[ci.idx],c[ci.idx]] * v * v print(" Zeroth-order MP energy: %12.8f" %e0_mp, flush=True) # This is not really MP once we have rotated away from the CMF basis. # H = F + (H - F), where F = sum_I F(I) # # After Tucker basis, we just use the diagonal of this fock operator. # Not ideal perhaps, but better than nothing at this stage denom = np.zeros(len(pt_vector)) idx = 0 for f,c,v in pt_vector: e0_X = 0 for ci in clustered_ham.clusters: e0_X += ci.ops['H_mf'][(f[ci.idx],f[ci.idx])][c[ci.idx],c[ci.idx]] denom[idx] = 1/(e0_mp - e0_X) idx += 1 end = time.time() print(" Time spent in denomimator: %12.2f" %( end - start), flush=True) pt_vector_v = pt_vector.get_vector() pt_vector_v.shape = (pt_vector_v.shape[0]) e2 = np.multiply(denom,pt_vector_v) pt_vector.set_vector(e2) e2 = np.dot(pt_vector_v,e2) print(" PT2 Energy Correction = %12.8f" %e2) print(" PT2 Energy Total = %12.8f" %(e0+e2)) if it >= max_iter: print(" Maxcycles: TPSCI") break print(" Choose which states to add to CI space", flush=True) for fockspace,configs in pt_vector.items(): for config,coeff in configs.items(): if coeff*coeff > thresh_cipsi: if fockspace in ci_vector: ci_vector[fockspace][config] = 0 else: ci_vector.add_fockspace(fockspace) ci_vector[fockspace][config] = 0 print(" Dimension of next CI space: ", len(ci_vector)) return ci_vector, pt_vector, e0, e0+e2
def system_setup(h, g, ecore, blocks, init_fspace, max_roots = 1000, delta_elec = None, cmf_maxiter = 10, cmf_thresh = 1e-8, cmf_dm_guess = None, #initial guess density matrices for cmf tuple(alpha, beta) cmf_diis = False, cmf_diis_start = 1, cmf_max_diis = 6 ): # {{{ """ If an input list of Cluster objects is provided for clusters_in, then the CMF will be restricted to that space spanned by that current basis """ print(" System setup option:") print(" |init_fspace : ", init_fspace) print(" |max_roots : ", max_roots) print(" |delta_elec : ", delta_elec) print(" |cmf_diis : ", cmf_diis) print(" |cmf_maxiter : ", cmf_maxiter) print(" |cmf_dm_guess : ", cmf_dm_guess != None) print(" |cmf_thresh : ", cmf_thresh) print(" |cmf_diis_start : ", cmf_diis_start) print(" |cmf_max_diis : ", cmf_max_diis) print(" |Ecore : ", ecore) n_blocks = len(blocks) clusters = [Cluster(ci,c) for ci,c in enumerate(blocks)] print(" Clusters:") [print(ci) for ci in clusters] clustered_ham = ClusteredOperator(clusters, core_energy=ecore) print(" Add 1-body terms") clustered_ham.add_local_terms() clustered_ham.add_1b_terms(h) print(" Add 2-body terms") clustered_ham.add_2b_terms(g) clustered_ham.h = h clustered_ham.g = g ci_vector = ClusteredState() ci_vector.init(clusters, init_fspace) if cmf_maxiter > 0: e_cmf, cmf_conv, rdm_a, rdm_b = cmf(clustered_ham, ci_vector, h, g, diis = cmf_diis, dm_guess = cmf_dm_guess, diis_start = cmf_diis_start, max_diis = cmf_max_diis, thresh = cmf_thresh, max_iter = cmf_maxiter) else: rdm_a = np.zeros(h.shape) rdm_b = np.zeros(h.shape) e_cmf = 0 cmf_conv = False # build cluster basis and operator matrices using CMF optimized density matrices for ci_idx, ci in enumerate(clusters): if delta_elec != None: fspaces_i = init_fspace[ci_idx] fspaces_i = ci.possible_fockspaces( delta_elec=(fspaces_i[0], fspaces_i[1], delta_elec) ) else: fspaces_i = ci.possible_fockspaces() print() print(" Form basis by diagonalizing local Hamiltonian for cluster: ",ci_idx) ci.form_fockspace_eigbasis(h, g, fspaces_i, max_roots=max_roots, rdm1_a=rdm_a, rdm1_b=rdm_b, ecore=ecore) print(" Build operator matrices for cluster ",ci.idx) ci.build_op_matrices() ci.build_local_terms(h,g) return clusters, clustered_ham, ci_vector, (e_cmf, rdm_a, rdm_b, cmf_conv)
def tpsci_tucker(ci_vector, clustered_ham, selection = "cipsi", thresh_cipsi = 1e-4, thresh_ci_clip = 1e-5, thresh_asci = 0, thresh_cipsi_conv = 1e-8, thresh_tucker_conv = 1e-6, thresh_search = 0, max_cipsi_iter = 30, max_tucker_iter = 20, tucker_state_clip = None, tucker_truncate = -1, hshift = 1e-8, pt_type = 'en', nbody_limit = 4, shared_mem = 1e9, batch_size = 1, tucker_conv_target = 0, matvec = 4, chk_file = None, nproc = None): """ Run iterations of TP-CIPSI to make the tucker decomposition self-consistent thresh_tucker_conv : thresh_cipsi : include qspace configurations into pspace that have probabilities larger than this value thresh_ci_clip : drop pspace configs whose variational solution yields probabilities smaller than this value thresh_cipsi_conv : stop selected CI when delta E is smaller than this value thresh_asci : only consider couplings to pspace configs with probabilities larger than this value thresh_search : delete couplings to pspace configs default: thresh_cipsi^1/2 / 1000 pt_type : Which denominator to use? Epstein-Nesbitt (en) or Moller-Plesset-like (mp) tucker_conv_target : Which energy should we use to determine convergence? 0 = variational energy 2 = pt2 energy shared_mem : How much memory to allocate for shared object store for holding clustered_ham - only works with matvec4 matvec : Which version of matvec to use? [1:4] tucker_state_clip : Delete PT1 coefficients smaller than this before adding to vector to perform HOSVD """ # {{{ print(" Tucker optimization options:") print(" |selection : ", selection ) print(" |thresh_cipsi : ", thresh_cipsi ) print(" |thresh_ci_clip : ", thresh_ci_clip ) print(" |thresh_cipsi_conv : ", thresh_cipsi_conv ) print(" |max_cipsi_iter : ", max_cipsi_iter ) print(" |thresh_tucker_conv : ", thresh_tucker_conv ) print(" |max_tucker_iter : ", max_tucker_iter ) print(" |tucker_state_clip : ", tucker_state_clip ) print(" |tucker_truncate : ", tucker_truncate ) print(" |hshift : ", hshift ) print(" |thresh_asci : ", thresh_asci ) print(" |thresh_search : ", thresh_search ) print(" |pt_type : ", pt_type ) print(" |nbody_limit : ", nbody_limit ) print(" |tucker_conv_target : ", tucker_conv_target ) print(" |nproc : ", nproc ) t_conv = False if tucker_state_clip == None: tucker_state_clip = thresh_cipsi/10.0 e_prev = 0 e_last = 0 ci_vector_ref = ci_vector.copy() for brdm_iter in range(max_tucker_iter+1): if selection == "cipsi": start = time.time() ci_vector, pt_vector, e0, e2 = tp_cipsi(ci_vector_ref.copy(), clustered_ham, pt_type = pt_type, thresh_cipsi = thresh_cipsi, thresh_ci_clip = thresh_ci_clip, thresh_conv = thresh_cipsi_conv, max_iter = max_cipsi_iter, thresh_asci = thresh_asci, nbody_limit = nbody_limit, matvec = matvec, batch_size = batch_size, shared_mem = shared_mem, thresh_search = thresh_search, nproc = nproc) end = time.time() ecore = clustered_ham.core_energy if tucker_conv_target == 0: e_curr = e0 elif tucker_conv_target == 2: e_curr = e2 else: print(" wrong value for tucker_conv_target") exit() print(" TPSCI: E0 = %12.8f E2 = %16.8f CI_DIM: %-12i Time spent %-12.1f" %(e0+ecore, e2+ecore, len(ci_vector), end-start)) elif selection == "heatbath": start = time.time() ci_vector, e0 = tp_hbci(ci_vector_ref.copy(), clustered_ham, thresh_cipsi = thresh_cipsi, thresh_ci_clip = thresh_ci_clip, thresh_conv = thresh_cipsi_conv, max_iter = max_cipsi_iter, thresh_asci = thresh_asci, nbody_limit = nbody_limit, matvec = matvec, batch_size = batch_size, shared_mem = shared_mem, thresh_search = thresh_search, nproc = nproc) end = time.time() pt_vector = ClusteredState() e_curr = e0 e2 = 0 ecore = clustered_ham.core_energy print(" HB-TPSCI: E0 = %16.8f CI_DIM: %-12i Time spent %-12.2f" %(e0+ecore, len(ci_vector), end-start)) if abs(e_prev-e_curr) < thresh_tucker_conv: print(" Converged: Tucker") t_conv = True break elif brdm_iter >= max_tucker_iter: print(" Maxcycles: Tucker") t_conv = False break e_prev = e_curr # do the Tucker decomposition if selection == "cipsi": print(" Reduce size of 1st order wavefunction",flush=True) print(" Before:",len(pt_vector)) pt_vector.clip(tucker_state_clip) pt_vector.add(ci_vector) pt_vector.normalize() print(" After:",len(pt_vector),flush=True) hosvd(pt_vector, clustered_ham, hshift=hshift, truncate=tucker_truncate) elif selection == "heatbath": hosvd(ci_vector, clustered_ham, hshift=hshift, truncate=tucker_truncate) # Should we rebuild the operator matrices after rotating basis? if 0: h = clustered_ham.h g = clustered_ham.g for ci in clustered_ham.clusters: print(" Build operator matrices for cluster ",ci.idx) ci.build_op_matrices() ci.build_local_terms(h,g) print(" Ensure TDMs are still contiguous:", flush=True) start = time.time() for ci in clustered_ham.clusters: print(" ", ci) for o in ci.ops: for fock in ci.ops[o]: if ci.ops[o][fock].data.contiguous == False: #print(" Rearrange data for %5s :" %o, fock) ci.ops[o][fock] = np.ascontiguousarray(ci.ops[o][fock]) stop = time.time() print(" Time spent making operators contiguous: %12.2f" %( stop-start)) if chk_file != None: print(" Saving Hamiltonian to disk",flush=True) file = open("%s_ham"%chk_file, 'wb') pickle.dump(clustered_ham, file) print(" Done.",flush=True) #print(" Saving wavefunction to disk",flush=True) #file = open("%s_vec"%chk_file, 'wb') #pickle.dump(ci_vector, file) print(" Done.",flush=True) return ci_vector, pt_vector, e0, e2, t_conv