def writeIntegralFile(DMRGCI, h1eff, eri_cas, ncas, nelec, ecore=0): if isinstance(nelec, (int, numpy.integer)): neleca = nelec // 2 + nelec % 2 nelecb = nelec - neleca else: neleca, nelecb = nelec integralFile = os.path.join(DMRGCI.runtimeDir, DMRGCI.integralFile) if DMRGCI.groupname is not None and DMRGCI.orbsym is not []: orbsym = numpy.asarray( dmrg_sym.convert_orbsym(DMRGCI.groupname, DMRGCI.orbsym)) pair_irrep = (orbsym.reshape(-1, 1) ^ orbsym)[numpy.tril_indices(ncas)] sym_forbid = pair_irrep.reshape(-1, 1) != pair_irrep.ravel() eri_cas = pyscf.ao2mo.restore(4, eri_cas, ncas) eri_cas[sym_forbid] = 0 eri_cas = pyscf.ao2mo.restore(8, eri_cas, ncas) else: orbsym = [] eri_cas = pyscf.ao2mo.restore(8, eri_cas, ncas) if not os.path.exists(DMRGCI.scratchDirectory): os.makedirs(DMRGCI.scratchDirectory) if not os.path.exists(DMRGCI.runtimeDir): os.makedirs(DMRGCI.runtimeDir) pyscf.tools.fcidump.from_integrals(integralFile, h1eff, eri_cas, ncas, neleca + nelecb, ecore, ms=abs(neleca - nelecb), orbsym=orbsym)
def writeIntegralFile(DMRGCI, h1eff, eri_cas, ncas, nelec, ecore=0): if isinstance(nelec, (int, numpy.integer)): neleca = nelec//2 + nelec%2 nelecb = nelec - neleca else : neleca, nelecb = nelec # The name of the FCIDUMP file, default is "FCIDUMP". integralFile = os.path.join(DMRGCI.runtimeDir, DMRGCI.integralFile) if DMRGCI.groupname is not None and DMRGCI.orbsym is not []: # First removing the symmetry forbidden integrals. This has been done using # the pyscf internal irrep-IDs (stored in DMRGCI.orbsym) orbsym = numpy.asarray(DMRGCI.orbsym) % 10 pair_irrep = (orbsym.reshape(-1,1) ^ orbsym)[numpy.tril_indices(ncas)] sym_forbid = pair_irrep.reshape(-1,1) != pair_irrep.ravel() eri_cas = ao2mo.restore(4, eri_cas, ncas) eri_cas[sym_forbid] = 0 eri_cas = ao2mo.restore(8, eri_cas, ncas) #orbsym = numpy.asarray(dmrg_sym.convert_orbsym(DMRGCI.groupname, DMRGCI.orbsym)) #eri_cas = pyscf.ao2mo.restore(8, eri_cas, ncas) # Then convert the pyscf internal irrep-ID to molpro irrep-ID orbsym = numpy.asarray(dmrg_sym.convert_orbsym(DMRGCI.groupname, orbsym)) else: orbsym = [] eri_cas = ao2mo.restore(8, eri_cas, ncas) if not os.path.exists(DMRGCI.scratchDirectory): os.makedirs(DMRGCI.scratchDirectory) if not os.path.exists(DMRGCI.runtimeDir): os.makedirs(DMRGCI.runtimeDir) tools.fcidump.from_integrals(integralFile, h1eff, eri_cas, ncas, neleca+nelecb, ecore, ms=abs(neleca-nelecb), orbsym=orbsym) return integralFile
def writeIntegralFile(DMRGCI, h1eff, eri_cas, ncas, nelec, ecore=0): if isinstance(nelec, (int, numpy.integer)): neleca = nelec // 2 + nelec % 2 nelecb = nelec - neleca else: neleca, nelecb = nelec integralFile = os.path.join(DMRGCI.runtimeDir, DMRGCI.integralFile) if DMRGCI.groupname is not None and DMRGCI.orbsym is not []: orbsym = dmrg_sym.convert_orbsym(DMRGCI.groupname, DMRGCI.orbsym) else: orbsym = [] if not os.path.exists(DMRGCI.scratchDirectory): os.makedirs(DMRGCI.scratchDirectory) if not os.path.exists(DMRGCI.runtimeDir): os.makedirs(DMRGCI.runtimeDir) eri_cas = pyscf.ao2mo.restore(8, eri_cas, ncas) pyscf.tools.fcidump.from_integrals(integralFile, h1eff, eri_cas, ncas, neleca + nelecb, ecore, ms=abs(neleca - nelecb), orbsym=orbsym)
def writeIntegralFile(SHCI, h1eff, eri_cas, norb, nelec, ecore=0): if isinstance(nelec, (int, numpy.integer)): neleca = nelec // 2 + nelec % 2 nelecb = nelec - neleca else: neleca, nelecb = nelec # The name of the FCIDUMP file, default is "FCIDUMP". integralFile = os.path.join(SHCI.runtimeDir, SHCI.integralFile) if not os.path.exists(SHCI.scratchDirectory): os.makedirs(SHCI.scratchDirectory) from pyscf import symm from pyscf.dmrgscf import dmrg_sym if (SHCI.groupname == 'Dooh' or SHCI.groupname == 'Cooh') and SHCI.useExtraSymm: eri_cas = pyscf.ao2mo.restore(1, eri_cas, norb) coeffs, nRows, rowIndex, rowCoeffs, orbsym = D2htoDinfh( SHCI, norb, nelec) newintt = numpy.tensordot(coeffs.conj(), h1eff, axes=([1], [0])) newint1 = numpy.tensordot(newintt, coeffs, axes=([1], [1])) newint1r = numpy.zeros(shape=(norb, norb), order='C') for i in range(norb): for j in range(norb): newint1r[i, j] = newint1[i, j].real eri_cas = pyscf.ao2mo.restore(1, eri_cas, norb) int2 = 1.0 * eri_cas eri_cas = 0.0 * eri_cas transformDinfh(norb, numpy.ascontiguousarray(nRows, numpy.int32), numpy.ascontiguousarray(rowIndex, numpy.int32), numpy.ascontiguousarray(rowCoeffs, numpy.float64), numpy.ascontiguousarray(int2, numpy.float64), numpy.ascontiguousarray(eri_cas, numpy.float64)) writeIntNoSymm(norb, numpy.ascontiguousarray(newint1r, numpy.float64), numpy.ascontiguousarray(eri_cas, numpy.float64), ecore, neleca + nelecb, numpy.asarray(orbsym, dtype=numpy.int32)) else: if SHCI.groupname is not None and SHCI.orbsym is not []: orbsym = dmrg_sym.convert_orbsym(SHCI.groupname, SHCI.orbsym) else: eri_cas = pyscf.ao2mo.restore(8, eri_cas, norb) orbsym = [1] * norb eri_cas = pyscf.ao2mo.restore(8, eri_cas, norb) # Writes the FCIDUMP file using functions in SHCI_tools.cpp. fcidumpFromIntegral(integralFile, h1eff, eri_cas, norb, neleca + nelecb, ecore, numpy.asarray(orbsym, dtype=numpy.int32), abs(neleca - nelecb)) print "ECORE: ", ecore
def writeIntegralFile(DMRGCI, h1eff, eri_cas, ncas, nelec, ecore=0): if isinstance(nelec, (int, numpy.integer)): neleca = nelec // 2 + nelec % 2 nelecb = nelec - neleca else: neleca, nelecb = nelec # The name of the FCIDUMP file, default is "FCIDUMP". integralFile = os.path.join(DMRGCI.runtimeDir, DMRGCI.integralFile) if DMRGCI.groupname is not None and DMRGCI.orbsym is not []: # First removing the symmetry forbidden integrals. This has been done using # the pyscf internal irrep-IDs (stored in DMRGCI.orbsym) orbsym = numpy.asarray(DMRGCI.orbsym) % 10 pair_irrep = (orbsym.reshape(-1, 1) ^ orbsym)[numpy.tril_indices(ncas)] sym_forbid = pair_irrep.reshape(-1, 1) != pair_irrep.ravel() eri_cas = ao2mo.restore(4, eri_cas, ncas) eri_cas[sym_forbid] = 0 eri_cas = ao2mo.restore(8, eri_cas, ncas) #orbsym = numpy.asarray(dmrg_sym.convert_orbsym(DMRGCI.groupname, DMRGCI.orbsym)) #eri_cas = pyscf.ao2mo.restore(8, eri_cas, ncas) # Then convert the pyscf internal irrep-ID to molpro irrep-ID orbsym = numpy.asarray( dmrg_sym.convert_orbsym(DMRGCI.groupname, orbsym)) else: orbsym = [] eri_cas = ao2mo.restore(8, eri_cas, ncas) # The following checks for scratch on node0 only but it can be missing on node1 etc. # The previous WF could have been copied to node0. # Comment out the if for now. # if not os.path.exists(DMRGCI.scratchDirectory): # os.makedirs(DMRGCI.scratchDirectory) cmd = ' '.join((DMRGCI.mpiprefix, "mkdir -p", DMRGCI.scratchDirectory)) check_call(cmd, shell=True) if not os.path.exists(DMRGCI.runtimeDir): os.makedirs(DMRGCI.runtimeDir) tools.fcidump.from_integrals(integralFile, h1eff, eri_cas, ncas, neleca + nelecb, ecore, ms=abs(neleca - nelecb), orbsym=orbsym) return integralFile
def writeIntegralFile(DMRGCI, h1eff, eri_cas, ncas, nelec): if isinstance(nelec, (int, numpy.integer)): neleca = nelec//2 + nelec%2 nelecb = nelec - neleca else : neleca, nelecb = nelec integralFile = os.path.join(DMRGCI.runtimeDir, DMRGCI.integralFile) if DMRGCI.groupname is not None and DMRGCI.orbsym: orbsym = dmrg_sym.convert_orbsym(DMRGCI.groupname, DMRGCI.orbsym) else: orbsym = [] if not os.path.exists(DMRGCI.scratchDirectory): os.makedirs(DMRGCI.scratchDirectory) eri_cas = pyscf.ao2mo.restore(8, eri_cas, ncas) pyscf.tools.fcidump.from_integrals(integralFile, h1eff, eri_cas, ncas, neleca+nelecb, ms=abs(neleca-nelecb), orbsym=orbsym)
def nevpt_integral_mpi(mc_chkfile,blockfile,dmrginp,dmrgout,scratch): from mpi4py import MPI comm = MPI.COMM_WORLD mpi_size = MPI.COMM_WORLD.Get_size() rank = comm.Get_rank() if rank == 0: mol = chkfile.load_mol(mc_chkfile) fh5 = h5py.File(mc_chkfile,'r') mo_coeff = fh5['mc/mo'].value ncore = fh5['mc/ncore'].value ncas = fh5['mc/ncas'].value nvirt = fh5['mc/nvirt'].value orbe = fh5['mc/orbe'].value root = fh5['mc/root'].value orbsym = list(fh5['mc/orbsym'].value) nelecas = fh5['mc/nelecas'].value h1e_Si = fh5['h1e_Si'].value h1e_Sr = fh5['h1e_Sr'].value h1e = fh5['h1e'].value e_core = fh5['e_core'].value h2e = fh5['h2e'].value h2e_Si = fh5['h2e_Si'].value h2e_Sr = fh5['h2e_Sr'].value fh5.close() headnode = MPI.Get_processor_name() else: mol = None mo_coeff = None ncore = None ncas = None nvirt = None orbe = None root = None orbsym = None nelecas = None h1e_Si = None h1e_Sr = None h1e = None e_core = None h2e = None h2e_Si = None h2e_Sr = None headnode = None comm.barrier() mol = comm.bcast(mol,root=0) mo_coeff = comm.bcast(mo_coeff,root=0) ncas = comm.bcast(ncas,root=0) ncore = comm.bcast(ncore,root=0) nvirt = comm.bcast(nvirt,root=0) root = comm.bcast(root,root=0) orbsym = comm.bcast(orbsym,root=0) nelecas = comm.bcast(nelecas,root=0) orbe = comm.bcast(orbe,root=0) h1e_Si = comm.bcast(h1e_Si,root=0) h1e_Sr = comm.bcast(h1e_Sr,root=0) h1e = comm.bcast(h1e,root=0) h2e = comm.bcast(h2e,root=0) h2e_Si = comm.bcast(h2e_Si,root=0) h2e_Sr = comm.bcast(h2e_Sr,root=0) headnode = comm.bcast(headnode,root=0) e_core = comm.bcast(e_core,root=0) mo_core = mo_coeff[:,:ncore] mo_cas = mo_coeff[:,ncore:ncore+ncas] mo_virt = mo_coeff[:,ncore+ncas:] nelec = nelecas[0] + nelecas[1] if mol.symmetry and len(orbsym): orbsym = orbsym[ncore:ncore+ncas] + orbsym[:ncore] + orbsym[ncore+ncas:] orbsym = dmrg_sym.convert_orbsym(mol.groupname, orbsym) else: orbsym = [1] * (ncore+ncas+nvirt) partial_size = int(math.floor((ncore+nvirt)/float(mpi_size))) num_of_orb_begin = min(rank*partial_size, ncore+nvirt) num_of_orb_end = min((rank+1)*partial_size, ncore+nvirt) #Adjust the distrubution the non-active orbitals to make sure one processor has at most one more orbital than average. if rank < (ncore+nvirt - partial_size*mpi_size): num_of_orb_begin += rank num_of_orb_end += rank + 1 else : num_of_orb_begin += ncore+nvirt - partial_size*mpi_size num_of_orb_end += ncore+nvirt - partial_size*mpi_size if num_of_orb_begin < ncore: if num_of_orb_end < ncore: h1e_Si = h1e_Si[:,num_of_orb_begin:num_of_orb_end] h2e_Si = h2e_Si[:,num_of_orb_begin:num_of_orb_end,:,:] h1e_Sr = [] h2e_Sr = [] # elif num_of_orb_end > ncore + nvirt : # h1e_Si = h1e_Si[:,num_of_orb_begin:] # h2e_Si = h2e_Si[:,num_of_orb_begin:,:,:] # #h2e_Sr = [] # orbsym = orbsym[:ncas] + orbsym[num_of_orb_begin:] # norb = ncas + ncore + nvirt - num_of_orb_begin else : h1e_Si = h1e_Si[:,num_of_orb_begin:] h2e_Si = h2e_Si[:,num_of_orb_begin:,:,:] h1e_Sr = h1e_Sr[:num_of_orb_end - ncore,:] h2e_Sr = h2e_Sr[:num_of_orb_end - ncore,:,:,:] elif num_of_orb_begin < ncore + nvirt : if num_of_orb_end <= ncore + nvirt: h1e_Si = [] h2e_Si = [] h1e_Sr = h1e_Sr[num_of_orb_begin - ncore:num_of_orb_end - ncore,:] h2e_Sr = h2e_Sr[num_of_orb_begin - ncore:num_of_orb_end - ncore,:,:,:] # else : # h1e_Si = [] # h2e_Si = [] # h1e_Sr = h1e_Sr[num_of_orb_begin - ncore:,:] # h2e_Sr = h2e_Sr[num_of_orb_begin - ncore:,:,:,:] # orbsym = orbsym[:ncas] + orbsym[ncas+num_of_orb_begin: ] # norb = ncas + ncore + nvirt - num_of_orb_begin else : raise RuntimeError('No job for this processor. It may block MPI.COMM_WORLD.barrier') norb = ncas + num_of_orb_end - num_of_orb_begin orbsym = orbsym[:ncas] + orbsym[ncas + num_of_orb_begin:ncas + num_of_orb_end] if num_of_orb_begin >= ncore: partial_core = 0 partial_virt = num_of_orb_end - num_of_orb_begin else: if num_of_orb_end >= ncore: partial_core = ncore -num_of_orb_begin partial_virt = num_of_orb_end - ncore else: partial_core = num_of_orb_end -num_of_orb_begin partial_virt = 0 newscratch = os.path.join(os.path.abspath(scratch), str(rank)) if not os.path.exists('%s'%newscratch): os.makedirs('%s'%newscratch) os.makedirs('%s/node0'%newscratch) nevptinp = os.path.join(newscratch, os.path.basename(dmrginp)) subprocess.check_call('cp %s %s'%(dmrginp,nevptinp),shell=True) f = open(nevptinp, 'a') f.write('restart_mps_nevpt %d %d %d \n'%(ncas,partial_core, partial_virt)) f.close() tol = float(1e-15) #from subprocess import Popen #from subprocess import PIPE #print 'scratch', scratch ##p1 = Popen(['cp %s/* %d/'%(scratch, rank)],shell=True,stderr=PIPE) #p1 = Popen(['cp','%s/*'%scratch, '%d/'%rank],shell=True,stderr=PIPE) #print p1.communicate() #p2 = Popen(['cp %s/node0/* %d'%(scratch, rank)],shell=True,stderr=PIPE) ##p2 = Popen(['cp','%s/node0/*'%scratch, '%d/'%rank],shell=True,stderr=PIPE) #print p2.communicate() #call('cp %s/* %d/'%(scratch,rank),shell = True,stderr=os.devnull) #call('cp %s/node0/* %d/'%(scratch,rank),shell = True,stderr=os.devnull) # f1 =open(os.devnull,'w') # if MPI.Get_processor_name() == headnode: # subprocess.call('cp %s/* %s/'%(scratch,newscratch),stderr=f1,shell = True) # subprocess.call('cp %s/node0/* %s/node0'%(scratch,newscratch),shell = True) # else: # subprocess.call('scp %s:%s/* %s/'%(headnode,scratch,newscratch),stderr=f1,shell = True) # subprocess.call('scp %s:%s/node0/* %s/node0'%(headnode,scratch,newscratch),shell = True) # f1.close() #TODO #Use mpi rather than scp to copy the file. #To make the code robust. if rank==0: filenames = [] for fn in os.listdir('%s/node0'%scratch): if fn== 'dmrg.e' or fn== 'statefile.0.tmp' or fn== 'RestartReorder.dat' or fn.startswith('wave') or fn.startswith('Rotation'): filenames.append(fn) else: filenames = None filenames = comm.bcast(filenames, root=0) for i in range(len(filenames)): if rank == 0: with open('%s/node0/%s'%(scratch,filenames[i]),'rb') as f: data = f.read() else: data = None data = comm.bcast(data, root=0) if data==None: print 'empty file' with open('%s/node0/%s'%(newscratch,filenames[i]),'wb') as f: f.write(data) f = open('%s/FCIDUMP'%newscratch,'w') pyscf.tools.fcidump.write_head(f,norb, nelec, ms=abs(nelecas[0]-nelecas[1]), orbsym=orbsym) #h2e in active space writeh2e_sym(h2e,f,tol) #h1e in active space writeh1e_sym(h1e,f,tol) orbe =list(orbe[:ncore]) + list(orbe[ncore+ncas:]) orbe = orbe[num_of_orb_begin:num_of_orb_end] for i in range(len(orbe)): f.write('% .16f %4d %4d %4d %4d\n'%(orbe[i],i+1+ncas,i+1+ncas,0,0)) f.write('%.16f %4d %4d %4d %4d\n'%(e_core,0,0,0,0)) if (len(h2e_Sr)): writeh2e(h2e_Sr,f,tol, shift0 = ncas + partial_core+1) f.write('% 4d %4d %4d %4d %4d\n'%(0,0,0,0,0)) if (len(h2e_Si)): writeh2e(h2e_Si,f,tol, shift1 = ncas+1) f.write('% 4d %4d %4d %4d %4d\n'%(0,0,0,0,0)) if (len(h1e_Sr)): writeh1e(h1e_Sr,f,tol, shift0 = ncas + partial_core+1) f.write('% 4d %4d %4d %4d %4d\n'%(0,0,0,0,0)) if (len(h1e_Si)): writeh1e(h1e_Si,f,tol, shift1 = ncas+1) f.write('% 4d %4d %4d %4d %4d\n'%(0,0,0,0,0)) f.write('% 4d %4d %4d %4d %4d\n'%(0,0,0,0,0)) f.close() current_path = os.getcwd() os.chdir('%s'%newscratch) env = os.environ envnew = {} for k in env: if 'MPI' not in k and 'SLURM' not in k: # remove PBS and SLURM environments to prevent Block running in MPI mode envnew[k] = os.environ[k] p = subprocess.Popen(['%s %s > %s'%(blockfile,nevptinp,dmrgout)], env=envnew, shell=True) p.wait() f = open('node0/Va_%d'%root,'r') Vr_energy = float(f.readline()) Vr_norm = float(f.readline()) f.close() f = open('node0/Vi_%d'%root,'r') Vi_energy = float(f.readline()) Vi_norm = float(f.readline()) f.close() comm.barrier() #Vr_total = 0.0 #Vi_total = 0.0 Vi_total_e = comm.gather(Vi_energy,root=0) Vi_total_norm = comm.gather(Vi_norm,root=0) Vr_total_e = comm.gather(Vr_energy,root=0) Vr_total_norm = comm.gather(Vr_norm,root=0) #comm.Reduce(Vi_energy,Vi_total,op=MPI.SUM, root=0) os.chdir('%s'%current_path) if rank == 0: fh5 = h5py.File('Perturbation_%d'%root,'w') fh5['Vi/energy'] = sum(Vi_total_e) fh5['Vi/norm'] = sum(Vi_total_norm) fh5['Vr/energy'] = sum(Vr_total_e) fh5['Vr/norm'] = sum(Vr_total_norm) fh5.close()
def writeIntegralFile(SHCI, h1eff, eri_cas, norb, nelec, ecore=0): if isinstance(nelec, (int, numpy.integer)): neleca = nelec // 2 + nelec % 2 nelecb = nelec - neleca else: neleca, nelecb = nelec # The name of the FCIDUMP file, default is "FCIDUMP". integralFile = os.path.join(SHCI.runtimeDir, SHCI.integralFile) from pyscf import symm from pyscf.dmrgscf import dmrg_sym if (SHCI.groupname == 'Dooh' or SHCI.groupname == 'Coov') and SHCI.useExtraSymm: coeffs, nRows, rowIndex, rowCoeffs, orbsym = D2htoDinfh( SHCI, norb, nelec) newintt = numpy.tensordot(coeffs.conj(), h1eff, axes=([1], [0])) newint1 = numpy.tensordot(newintt, coeffs, axes=([1], [1])) newint1r = numpy.zeros(shape=(norb, norb), order='C') for i in range(norb): for j in range(norb): newint1r[i, j] = newint1[i, j].real int2 = pyscf.ao2mo.restore(1, eri_cas, norb) eri_cas = numpy.zeros_like(int2) transformDinfh(norb, numpy.ascontiguousarray(nRows, numpy.int32), numpy.ascontiguousarray(rowIndex, numpy.int32), numpy.ascontiguousarray(rowCoeffs, numpy.float64), numpy.ascontiguousarray(int2, numpy.float64), numpy.ascontiguousarray(eri_cas, numpy.float64)) writeIntNoSymm(norb, numpy.ascontiguousarray(newint1r, numpy.float64), numpy.ascontiguousarray(eri_cas, numpy.float64), ecore, neleca + nelecb, numpy.asarray(orbsym, dtype=numpy.int32)) else: if SHCI.groupname is not None and SHCI.orbsym is not []: orbsym = dmrg_sym.convert_orbsym(SHCI.groupname, SHCI.orbsym) else: orbsym = [1] * norb eri_cas = pyscf.ao2mo.restore(8, eri_cas, norb) # Writes the FCIDUMP file using functions in SHCI_tools.cpp. integralFile = integralFile.encode( ) # .encode for python3 compatibility fcidumpFromIntegral(integralFile, h1eff, eri_cas, norb, neleca + nelecb, ecore, numpy.asarray(orbsym, dtype=numpy.int32), abs(neleca - nelecb)) # Fix possible errors in symmetry lables if SHCI.groupname == 'Dooh' and SHCI.useExtraSymm: # # these lines call the python program # if os.path.isfile("FCIDUMP_new"): # check_call('rm FCIDUMP_new', shell = True) # check_call('python checkLz_fcidump.py FCIDUMP', shell=True) # while os.path.isfile("FCIDUMP_new"): # check_call('mv FCIDUMP_new FCIDUMP', shell = True) # check_call('python checkLz_fcidump.py FCIDUMP', shell = True) # these lines call the fortran program check_call(SHCI.Lz_relabel + ' ' + integralFile + ' ' + SHCI.pyscf_home, shell=True) orbsym = getorbsymfromFCIDUMP(norb)
def writeSHCIConfFile(SHCI, nelec, Restart): confFile = os.path.join(SHCI.runtimeDir, SHCI.configFile) f = open(confFile, 'w') # Reference determinant section f.write('nocc %i\n' % (nelec[0] + nelec[1])) if SHCI.__class__.__name__ == 'FakeCISolver': for i in range(nelec[0]): f.write('%i ' % (2 * i)) for i in range(nelec[1]): f.write('%i ' % (2 * i + 1)) else: if SHCI.initialStates is not None: for i in range(len(SHCI.initialStates)): for j in SHCI.initialStates[i]: f.write('%i ' % (j)) if (i != len(SHCI.initialStates) - 1): f.write('\n') elif SHCI.irrep_nelec is None: for i in range(int(nelec[0])): f.write('%i ' % (2 * i)) for i in range(int(nelec[1])): f.write('%i ' % (2 * i + 1)) else: from pyscf import symm from pyscf.dmrgscf import dmrg_sym from pyscf.symm.basis import DOOH_IRREP_ID_TABLE if SHCI.groupname is not None and SHCI.orbsym is not []: orbsym = dmrg_sym.convert_orbsym(SHCI.groupname, SHCI.orbsym) else: orbsym = [1] * norb done = [] for k, v in SHCI.irrep_nelec.items(): irrep, nalpha, nbeta = [dmrg_sym.irrep_name2id(SHCI.groupname, k)],\ v[0], v[1] for i in range(len(orbsym)): #loop over alpha electrons if (orbsym[i] == irrep[0] and nalpha != 0 and i * 2 not in done): done.append(i * 2) f.write('%i ' % (i * 2)) nalpha -= 1 if (orbsym[i] == irrep[0] and nbeta != 0 and i * 2 + 1 not in done): done.append(i * 2 + 1) f.write('%i ' % (i * 2 + 1)) nbeta -= 1 if (nalpha != 0): print("number of irreps %s in active space = %d" % (k, v[0] - nalpha)) print("number of irreps %s alpha electrons = %d" % (k, v[0])) exit(1) if (nbeta != 0): print("number of irreps %s in active space = %d" % (k, v[1] - nbeta)) print("number of irreps %s beta electrons = %d" % (k, v[1])) exit(1) f.write('\nend\n') f.write('nroots %r\n' % SHCI.nroots) # Variational Keyword Section if (not Restart): schedStr = make_sched(SHCI) f.write(schedStr) else: f.write('schedule\n') f.write('%d %g\n' % (0, SHCI.sweep_epsilon[-1])) f.write('end\n') f.write('davidsonTol %g\n' % SHCI.davidsonTol) f.write('dE %g\n' % SHCI.dE) if (SHCI.DoRDM): f.write('DoRDM\n') # Perturbative Keyword Section if (SHCI.stochastic == False): f.write('deterministic \n') else: f.write('nPTiter %d\n' % SHCI.nPTiter) f.write('epsilon2 %g\n' % SHCI.epsilon2) f.write('epsilon2Large %g\n' % SHCI.epsilon2Large) f.write('targetError %g\n' % SHCI.targetError) f.write('sampleN %i\n' % SHCI.sampleN) # Miscellaneous Keywords f.write('noio \n') if (SHCI.prefix != ""): if not os.path.exists(SHCI.prefix): os.makedirs(SHCI.prefix) f.write('prefix %s\n' % (SHCI.prefix)) # Sets maxiter to 6 more than the last iter in sweep_iter[] if restarted. if (not Restart): f.write('maxiter %i\n' % (SHCI.sweep_iter[-1] + 6)) else: f.write('maxiter 6\n') f.write('fullrestart\n') f.write('\n') # SHCI requires that there is an extra line. f.write('%s\n' % (SHCI.extraline)) f.close()
def _write_integral_file(mc_chkfile, nevpt_scratch, comm): mpi_size = comm.Get_size() rank = comm.Get_rank() if rank == 0: fh5 = h5py.File(mc_chkfile, 'r') def load(key): if key in fh5: return comm.bcast(fh5[key][()]) else: return comm.bcast([]) else: def load(key): return comm.bcast(None) mol = gto.loads(load('mol')) ncore = load('mc/ncore') ncas = load('mc/ncas') nvirt = load('mc/nvirt') orbe = load('mc/orbe') orbsym = list(load('mc/orbsym')) nelecas = load('mc/nelecas') h1e_Si = load('h1e_Si') h1e_Sr = load('h1e_Sr') h1e = load('h1e') e_core = load('e_core') h2e = load('h2e') h2e_Si = load('h2e_Si') h2e_Sr = load('h2e_Sr') if rank == 0: fh5.close() if mol.symmetry and len(orbsym) > 0: orbsym = orbsym[ncore:ncore + ncas] + orbsym[:ncore] + orbsym[ncore + ncas:] orbsym = dmrg_sym.convert_orbsym(mol.groupname, orbsym) else: orbsym = [1] * (ncore + ncas + nvirt) partial_size = int(math.floor((ncore + nvirt) / float(mpi_size))) num_of_orb_begin = min(rank * partial_size, ncore + nvirt) num_of_orb_end = min((rank + 1) * partial_size, ncore + nvirt) #Adjust the distrubution the non-active orbitals to make sure one processor has at most one more orbital than average. if rank < (ncore + nvirt - partial_size * mpi_size): num_of_orb_begin += rank num_of_orb_end += rank + 1 else: num_of_orb_begin += ncore + nvirt - partial_size * mpi_size num_of_orb_end += ncore + nvirt - partial_size * mpi_size if num_of_orb_begin < ncore: if num_of_orb_end < ncore: h1e_Si = h1e_Si[:, num_of_orb_begin:num_of_orb_end] h2e_Si = h2e_Si[:, num_of_orb_begin:num_of_orb_end, :, :] h1e_Sr = [] h2e_Sr = [] # elif num_of_orb_end > ncore + nvirt : # h1e_Si = h1e_Si[:,num_of_orb_begin:] # h2e_Si = h2e_Si[:,num_of_orb_begin:,:,:] # #h2e_Sr = [] # orbsym = orbsym[:ncas] + orbsym[num_of_orb_begin:] # norb = ncas + ncore + nvirt - num_of_orb_begin else: h1e_Si = h1e_Si[:, num_of_orb_begin:] h2e_Si = h2e_Si[:, num_of_orb_begin:, :, :] h1e_Sr = h1e_Sr[:num_of_orb_end - ncore, :] h2e_Sr = h2e_Sr[:num_of_orb_end - ncore, :, :, :] elif num_of_orb_begin < ncore + nvirt: if num_of_orb_end <= ncore + nvirt: h1e_Si = [] h2e_Si = [] h1e_Sr = h1e_Sr[num_of_orb_begin - ncore:num_of_orb_end - ncore, :] h2e_Sr = h2e_Sr[num_of_orb_begin - ncore:num_of_orb_end - ncore, :, :, :] # else : # h1e_Si = [] # h2e_Si = [] # h1e_Sr = h1e_Sr[num_of_orb_begin - ncore:,:] # h2e_Sr = h2e_Sr[num_of_orb_begin - ncore:,:,:,:] # orbsym = orbsym[:ncas] + orbsym[ncas+num_of_orb_begin: ] # norb = ncas + ncore + nvirt - num_of_orb_begin else: raise RuntimeError( 'No job for this processor. It may block MPI.COMM_WORLD.barrier') norb = ncas + num_of_orb_end - num_of_orb_begin orbsym = orbsym[:ncas] + orbsym[ncas + num_of_orb_begin:ncas + num_of_orb_end] if num_of_orb_begin >= ncore: partial_core = 0 partial_virt = num_of_orb_end - num_of_orb_begin else: if num_of_orb_end >= ncore: partial_core = ncore - num_of_orb_begin partial_virt = num_of_orb_end - ncore else: partial_core = num_of_orb_end - num_of_orb_begin partial_virt = 0 tol = float(1e-15) f = open(os.path.join(nevpt_scratch, 'FCIDUMP'), 'w') nelec = nelecas[0] + nelecas[1] fcidump.write_head(f, norb, nelec, ms=abs(nelecas[0] - nelecas[1]), orbsym=orbsym) #h2e in active space writeh2e_sym(h2e, f, tol) #h1e in active space writeh1e_sym(h1e, f, tol) orbe = list(orbe[:ncore]) + list(orbe[ncore + ncas:]) orbe = orbe[num_of_orb_begin:num_of_orb_end] for i in range(len(orbe)): f.write('% .16f %4d %4d %4d %4d\n' % (orbe[i], i + 1 + ncas, i + 1 + ncas, 0, 0)) f.write('%.16f %4d %4d %4d %4d\n' % (e_core, 0, 0, 0, 0)) if (len(h2e_Sr)): writeh2e(h2e_Sr, f, tol, shift0=ncas + partial_core + 1) f.write('% 4d %4d %4d %4d %4d\n' % (0, 0, 0, 0, 0)) if (len(h2e_Si)): writeh2e(h2e_Si, f, tol, shift1=ncas + 1) f.write('% 4d %4d %4d %4d %4d\n' % (0, 0, 0, 0, 0)) if (len(h1e_Sr)): writeh1e(h1e_Sr, f, tol, shift0=ncas + partial_core + 1) f.write('% 4d %4d %4d %4d %4d\n' % (0, 0, 0, 0, 0)) if (len(h1e_Si)): writeh1e(h1e_Si, f, tol, shift1=ncas + 1) f.write('% 4d %4d %4d %4d %4d\n' % (0, 0, 0, 0, 0)) f.write('% 4d %4d %4d %4d %4d\n' % (0, 0, 0, 0, 0)) f.close() return ncas, partial_core, partial_virt
def nevpt_integral_mpi(mc_chkfile,blockfile,dmrginp,dmrgout,scratch): from mpi4py import MPI comm = MPI.COMM_WORLD mpi_size = MPI.COMM_WORLD.Get_size() rank = comm.Get_rank() if rank == 0: mol = chkfile.load_mol(mc_chkfile) fh5 = h5py.File(mc_chkfile,'r') mo_coeff = fh5['mc/mo'].value ncore = fh5['mc/ncore'].value ncas = fh5['mc/ncas'].value nvirt = fh5['mc/nvirt'].value orbe = fh5['mc/orbe'].value root = fh5['mc/root'].value orbsym = list(fh5['mc/orbsym'].value) nelecas = fh5['mc/nelecas'].value h1e_Si = fh5['h1e_Si'].value h1e_Sr = fh5['h1e_Sr'].value h1e = fh5['h1e'].value e_core = fh5['e_core'].value h2e = fh5['h2e'].value h2e_Si = fh5['h2e_Si'].value h2e_Sr = fh5['h2e_Sr'].value fh5.close() headnode = MPI.Get_processor_name() else: mol = None mo_coeff = None ncore = None ncas = None nvirt = None orbe = None root = None orbsym = None nelecas = None h1e_Si = None h1e_Sr = None h1e = None e_core = None h2e = None h2e_Si = None h2e_Sr = None headnode = None comm.barrier() mol = comm.bcast(mol,root=0) mo_coeff = comm.bcast(mo_coeff,root=0) ncas = comm.bcast(ncas,root=0) ncore = comm.bcast(ncore,root=0) nvirt = comm.bcast(nvirt,root=0) root = comm.bcast(root,root=0) orbsym = comm.bcast(orbsym,root=0) nelecas = comm.bcast(nelecas,root=0) orbe = comm.bcast(orbe,root=0) h1e_Si = comm.bcast(h1e_Si,root=0) h1e_Sr = comm.bcast(h1e_Sr,root=0) h1e = comm.bcast(h1e,root=0) h2e = comm.bcast(h2e,root=0) h2e_Si = comm.bcast(h2e_Si,root=0) h2e_Sr = comm.bcast(h2e_Sr,root=0) headnode = comm.bcast(headnode,root=0) e_core = comm.bcast(e_core,root=0) mo_core = mo_coeff[:,:ncore] mo_cas = mo_coeff[:,ncore:ncore+ncas] mo_virt = mo_coeff[:,ncore+ncas:] nelec = nelecas[0] + nelecas[1] if mol.symmetry and len(orbsym): orbsym = orbsym[ncore:ncore+ncas] + orbsym[:ncore] + orbsym[ncore+ncas:] orbsym = dmrg_sym.convert_orbsym(mol.groupname, orbsym) else: orbsym = [1] * (ncore+ncas+nvirt) partial_size = int(math.floor((ncore+nvirt)/float(mpi_size))) num_of_orb_begin = min(rank*partial_size, ncore+nvirt) num_of_orb_end = min((rank+1)*partial_size, ncore+nvirt) #Adjust the distrubution the non-active orbitals to make sure one processor has at most one more orbital than average. if rank < (ncore+nvirt - partial_size*mpi_size): num_of_orb_begin += rank num_of_orb_end += rank + 1 else : num_of_orb_begin += ncore+nvirt - partial_size*mpi_size num_of_orb_end += ncore+nvirt - partial_size*mpi_size if num_of_orb_begin < ncore: if num_of_orb_end < ncore: h1e_Si = h1e_Si[:,num_of_orb_begin:num_of_orb_end] h2e_Si = h2e_Si[:,num_of_orb_begin:num_of_orb_end,:,:] h1e_Sr = [] h2e_Sr = [] # elif num_of_orb_end > ncore + nvirt : # h1e_Si = h1e_Si[:,num_of_orb_begin:] # h2e_Si = h2e_Si[:,num_of_orb_begin:,:,:] # #h2e_Sr = [] # orbsym = orbsym[:ncas] + orbsym[num_of_orb_begin:] # norb = ncas + ncore + nvirt - num_of_orb_begin else : h1e_Si = h1e_Si[:,num_of_orb_begin:] h2e_Si = h2e_Si[:,num_of_orb_begin:,:,:] h1e_Sr = h1e_Sr[:num_of_orb_end - ncore,:] h2e_Sr = h2e_Sr[:num_of_orb_end - ncore,:,:,:] elif num_of_orb_begin < ncore + nvirt : if num_of_orb_end <= ncore + nvirt: h1e_Si = [] h2e_Si = [] h1e_Sr = h1e_Sr[num_of_orb_begin - ncore:num_of_orb_end - ncore,:] h2e_Sr = h2e_Sr[num_of_orb_begin - ncore:num_of_orb_end - ncore,:,:,:] # else : # h1e_Si = [] # h2e_Si = [] # h1e_Sr = h1e_Sr[num_of_orb_begin - ncore:,:] # h2e_Sr = h2e_Sr[num_of_orb_begin - ncore:,:,:,:] # orbsym = orbsym[:ncas] + orbsym[ncas+num_of_orb_begin: ] # norb = ncas + ncore + nvirt - num_of_orb_begin else : raise RuntimeError('No job for this processor. It may block MPI.COMM_WORLD.barrier') norb = ncas + num_of_orb_end - num_of_orb_begin orbsym = orbsym[:ncas] + orbsym[ncas + num_of_orb_begin:ncas + num_of_orb_end] if num_of_orb_begin >= ncore: partial_core = 0 partial_virt = num_of_orb_end - num_of_orb_begin else: if num_of_orb_end >= ncore: partial_core = ncore -num_of_orb_begin partial_virt = num_of_orb_end - ncore else: partial_core = num_of_orb_end -num_of_orb_begin partial_virt = 0 newscratch = os.path.join(scratch, str(rank)) if not os.path.exists('%s'%newscratch): os.makedirs('%s'%newscratch) os.makedirs('%s/node0'%newscratch) subprocess.check_call('cp %s %s/%s'%(dmrginp,newscratch,dmrginp),shell=True) f = open('%s/%s'%(newscratch,dmrginp), 'a') f.write('restart_mps_nevpt %d %d %d \n'%(ncas,partial_core, partial_virt)) f.close() tol = float(1e-15) #from subprocess import Popen #from subprocess import PIPE #print 'scratch', scratch ##p1 = Popen(['cp %s/* %d/'%(scratch, rank)],shell=True,stderr=PIPE) #p1 = Popen(['cp','%s/*'%scratch, '%d/'%rank],shell=True,stderr=PIPE) #print p1.communicate() #p2 = Popen(['cp %s/node0/* %d'%(scratch, rank)],shell=True,stderr=PIPE) ##p2 = Popen(['cp','%s/node0/*'%scratch, '%d/'%rank],shell=True,stderr=PIPE) #print p2.communicate() #call('cp %s/* %d/'%(scratch,rank),shell = True,stderr=os.devnull) #call('cp %s/node0/* %d/'%(scratch,rank),shell = True,stderr=os.devnull) # f1 =open(os.devnull,'w') # if MPI.Get_processor_name() == headnode: # subprocess.call('cp %s/* %s/'%(scratch,newscratch),stderr=f1,shell = True) # subprocess.call('cp %s/node0/* %s/node0'%(scratch,newscratch),shell = True) # else: # subprocess.call('scp %s:%s/* %s/'%(headnode,scratch,newscratch),stderr=f1,shell = True) # subprocess.call('scp %s:%s/node0/* %s/node0'%(headnode,scratch,newscratch),shell = True) # f1.close() #TODO #Use mpi rather than scp to copy the file. #To make the code robust. if rank==0: filenames = [] for fn in os.listdir('%s/node0'%scratch): if fn== 'dmrg.e' or fn== 'statefile.0.tmp' or fn== 'RestartReorder.dat' or fn.startswith('wave') or fn.startswith('Rotation'): filenames.append(fn) else: filenames = None filenames = comm.bcast(filenames, root=0) for i in range(len(filenames)): if rank == 0: with open('%s/node0/%s'%(scratch,filenames[i]),'rb') as f: data = f.read() else: data = None data = comm.bcast(data, root=0) if data==None: print 'empty file' with open('%s/node0/%s'%(newscratch,filenames[i]),'wb') as f: f.write(data) f = open('%s/FCIDUMP'%newscratch,'w') pyscf.tools.fcidump.write_head(f,norb, nelec, ms=abs(nelecas[0]-nelecas[1]), orbsym=orbsym) #h2e in active space writeh2e_sym(h2e,f,tol) #h1e in active space writeh1e_sym(h1e,f,tol) orbe =list(orbe[:ncore]) + list(orbe[ncore+ncas:]) orbe = orbe[num_of_orb_begin:num_of_orb_end] for i in range(len(orbe)): f.write('% .16f %4d %4d %4d %4d\n'%(orbe[i],i+1+ncas,i+1+ncas,0,0)) f.write('%.16f %4d %4d %4d %4d\n'%(e_core,0,0,0,0)) if (len(h2e_Sr)): writeh2e(h2e_Sr,f,tol, shift0 = ncas + partial_core+1) f.write('% 4d %4d %4d %4d %4d\n'%(0,0,0,0,0)) if (len(h2e_Si)): writeh2e(h2e_Si,f,tol, shift1 = ncas+1) f.write('% 4d %4d %4d %4d %4d\n'%(0,0,0,0,0)) if (len(h1e_Sr)): writeh1e(h1e_Sr,f,tol, shift0 = ncas + partial_core+1) f.write('% 4d %4d %4d %4d %4d\n'%(0,0,0,0,0)) if (len(h1e_Si)): writeh1e(h1e_Si,f,tol, shift1 = ncas+1) f.write('% 4d %4d %4d %4d %4d\n'%(0,0,0,0,0)) f.write('% 4d %4d %4d %4d %4d\n'%(0,0,0,0,0)) f.close() current_path = os.getcwd() os.chdir('%s'%newscratch) env = os.environ envnew = {} for k in env: if 'MPI' not in k and 'SLURM' not in k: # remove PBS and SLURM environments to prevent Block running in MPI mode envnew[k] = os.environ[k] p = subprocess.Popen(['%s %s > %s'%(blockfile,dmrginp,dmrgout)], env=envnew, shell=True) p.wait() f = open('node0/Va_%d'%root,'r') Vr_energy = float(f.readline()) Vr_norm = float(f.readline()) f.close() f = open('node0/Vi_%d'%root,'r') Vi_energy = float(f.readline()) Vi_norm = float(f.readline()) f.close() comm.barrier() #Vr_total = 0.0 #Vi_total = 0.0 Vi_total_e = comm.gather(Vi_energy,root=0) Vi_total_norm = comm.gather(Vi_norm,root=0) Vr_total_e = comm.gather(Vr_energy,root=0) Vr_total_norm = comm.gather(Vr_norm,root=0) #comm.Reduce(Vi_energy,Vi_total,op=MPI.SUM, root=0) os.chdir('%s'%current_path) if rank == 0: fh5 = h5py.File('Perturbation_%d'%root,'w') fh5['Vi/energy'] = sum(Vi_total_e) fh5['Vi/norm'] = sum(Vi_total_norm) fh5['Vr/energy'] = sum(Vr_total_e) fh5['Vr/norm'] = sum(Vr_total_norm) fh5.close()
def _write_integral_file(mc_chkfile, nevpt_scratch, comm): mpi_size = comm.Get_size() rank = comm.Get_rank() if rank == 0: fh5 = h5py.File(mc_chkfile, 'r') def load(key): if key in fh5: return comm.bcast(fh5[key].value) else: return comm.bcast([]) else: def load(key): return comm.bcast(None) mol = gto.loads(load('mol')) ncore = load('mc/ncore') ncas = load('mc/ncas') nvirt = load('mc/nvirt') orbe = load('mc/orbe') orbsym = list(load('mc/orbsym')) nelecas = load('mc/nelecas') h1e_Si = load('h1e_Si') h1e_Sr = load('h1e_Sr') h1e = load('h1e') e_core = load('e_core') h2e = load('h2e') h2e_Si = load('h2e_Si') h2e_Sr = load('h2e_Sr') if rank == 0: fh5.close() if mol.symmetry and len(orbsym) > 0: orbsym = orbsym[ncore:ncore+ncas] + orbsym[:ncore] + orbsym[ncore+ncas:] orbsym = dmrg_sym.convert_orbsym(mol.groupname, orbsym) else: orbsym = [1] * (ncore+ncas+nvirt) partial_size = int(math.floor((ncore+nvirt)/float(mpi_size))) num_of_orb_begin = min(rank*partial_size, ncore+nvirt) num_of_orb_end = min((rank+1)*partial_size, ncore+nvirt) #Adjust the distrubution the non-active orbitals to make sure one processor has at most one more orbital than average. if rank < (ncore+nvirt - partial_size*mpi_size): num_of_orb_begin += rank num_of_orb_end += rank + 1 else : num_of_orb_begin += ncore+nvirt - partial_size*mpi_size num_of_orb_end += ncore+nvirt - partial_size*mpi_size if num_of_orb_begin < ncore: if num_of_orb_end < ncore: h1e_Si = h1e_Si[:,num_of_orb_begin:num_of_orb_end] h2e_Si = h2e_Si[:,num_of_orb_begin:num_of_orb_end,:,:] h1e_Sr = [] h2e_Sr = [] # elif num_of_orb_end > ncore + nvirt : # h1e_Si = h1e_Si[:,num_of_orb_begin:] # h2e_Si = h2e_Si[:,num_of_orb_begin:,:,:] # #h2e_Sr = [] # orbsym = orbsym[:ncas] + orbsym[num_of_orb_begin:] # norb = ncas + ncore + nvirt - num_of_orb_begin else : h1e_Si = h1e_Si[:,num_of_orb_begin:] h2e_Si = h2e_Si[:,num_of_orb_begin:,:,:] h1e_Sr = h1e_Sr[:num_of_orb_end - ncore,:] h2e_Sr = h2e_Sr[:num_of_orb_end - ncore,:,:,:] elif num_of_orb_begin < ncore + nvirt : if num_of_orb_end <= ncore + nvirt: h1e_Si = [] h2e_Si = [] h1e_Sr = h1e_Sr[num_of_orb_begin - ncore:num_of_orb_end - ncore,:] h2e_Sr = h2e_Sr[num_of_orb_begin - ncore:num_of_orb_end - ncore,:,:,:] # else : # h1e_Si = [] # h2e_Si = [] # h1e_Sr = h1e_Sr[num_of_orb_begin - ncore:,:] # h2e_Sr = h2e_Sr[num_of_orb_begin - ncore:,:,:,:] # orbsym = orbsym[:ncas] + orbsym[ncas+num_of_orb_begin: ] # norb = ncas + ncore + nvirt - num_of_orb_begin else : raise RuntimeError('No job for this processor. It may block MPI.COMM_WORLD.barrier') norb = ncas + num_of_orb_end - num_of_orb_begin orbsym = orbsym[:ncas] + orbsym[ncas + num_of_orb_begin:ncas + num_of_orb_end] if num_of_orb_begin >= ncore: partial_core = 0 partial_virt = num_of_orb_end - num_of_orb_begin else: if num_of_orb_end >= ncore: partial_core = ncore -num_of_orb_begin partial_virt = num_of_orb_end - ncore else: partial_core = num_of_orb_end -num_of_orb_begin partial_virt = 0 tol = float(1e-15) f = open(os.path.join(nevpt_scratch, 'FCIDUMP'), 'w') nelec = nelecas[0] + nelecas[1] fcidump.write_head(f,norb, nelec, ms=abs(nelecas[0]-nelecas[1]), orbsym=orbsym) #h2e in active space writeh2e_sym(h2e,f,tol) #h1e in active space writeh1e_sym(h1e,f,tol) orbe =list(orbe[:ncore]) + list(orbe[ncore+ncas:]) orbe = orbe[num_of_orb_begin:num_of_orb_end] for i in range(len(orbe)): f.write('% .16f %4d %4d %4d %4d\n'%(orbe[i],i+1+ncas,i+1+ncas,0,0)) f.write('%.16f %4d %4d %4d %4d\n'%(e_core,0,0,0,0)) if (len(h2e_Sr)): writeh2e(h2e_Sr,f,tol, shift0 = ncas + partial_core+1) f.write('% 4d %4d %4d %4d %4d\n'%(0,0,0,0,0)) if (len(h2e_Si)): writeh2e(h2e_Si,f,tol, shift1 = ncas+1) f.write('% 4d %4d %4d %4d %4d\n'%(0,0,0,0,0)) if (len(h1e_Sr)): writeh1e(h1e_Sr,f,tol, shift0 = ncas + partial_core+1) f.write('% 4d %4d %4d %4d %4d\n'%(0,0,0,0,0)) if (len(h1e_Si)): writeh1e(h1e_Si,f,tol, shift1 = ncas+1) f.write('% 4d %4d %4d %4d %4d\n'%(0,0,0,0,0)) f.write('% 4d %4d %4d %4d %4d\n'%(0,0,0,0,0)) f.close() return ncas, partial_core, partial_virt