def load_from_pyscf_chk_mol(chkfile, base='scf'): mol = load_mol(chkfile) with h5py.File(chkfile, 'r') as fh5: try: hcore = fh5['/scf/hcore'][:] except KeyError: hcore = mol.intor_symmetric('int1e_nuc') hcore += mol.intor_symmetric('int1e_kin') if len(mol._ecpbas) > 0: hcore += mol.intor_symmetric('ECPScalar') try: X = fh5['/scf/orthoAORot'][:] except KeyError: s1e = mol.intor('int1e_ovlp_sph') X = get_ortho_ao_mol(s1e) try: df_ints = fh5['j3c'][:] except KeyError: df_ints = None mo_occ = numpy.array(lib.chkfile.load(chkfile, base+'/mo_occ')) mo_coeff = numpy.array(lib.chkfile.load(chkfile, base+'/mo_coeff')) uhf = len(mo_coeff.shape) == 3 if mol.nelec[0] != mol.nelec[1] and not uhf: rohf = True else: rohf = False scf_data = {'mol': mol, 'mo_occ': mo_occ, 'hcore': hcore, 'X': X, 'mo_coeff': mo_coeff, 'isUHF': uhf, 'df_ints': df_ints, 'rohf': rohf} return scf_data
def pyscf_from_file(chkfile): mol = chk.load_mol(chkfile) mf = RHF(mol) mf.__dict__.update(chk.load(chkfile, 'scf')) mc_dict = chk.load(chkfile, 'mcscf') if mc_dict: mc_dict['ci'] = chk.load(chkfile, 'ci') mc_dict['nelecas'] = tuple(map(int, chk.load(chkfile, 'nelecas'))) mc = CASSCF(mf, 0, 0) mc.__dict__.update(mc_dict) else: mc = None return mf, mc
def load_mol(chkfile): ''' Load the mol from the chkfile. See pyscf.lib.chkfile ''' if mpi_helper.rank == 0: mol = chkutil.load_mol(chkfile) dumps = mol.dumps() else: dumps = None mpi_helper.barrier() dumps = mpi_helper.bcast_dict(dumps) mol = gto.loads(dumps) return mol
def nevpt_integral_mpi(mc_chkfile,blockfile,dmrginp,dmrgout,scratch): from mpi4py import MPI comm = MPI.COMM_WORLD mpi_size = MPI.COMM_WORLD.Get_size() rank = comm.Get_rank() if rank == 0: mol = chkfile.load_mol(mc_chkfile) fh5 = h5py.File(mc_chkfile,'r') mo_coeff = fh5['mc/mo'].value ncore = fh5['mc/ncore'].value ncas = fh5['mc/ncas'].value nvirt = fh5['mc/nvirt'].value orbe = fh5['mc/orbe'].value root = fh5['mc/root'].value orbsym = list(fh5['mc/orbsym'].value) nelecas = fh5['mc/nelecas'].value h1e_Si = fh5['h1e_Si'].value h1e_Sr = fh5['h1e_Sr'].value h1e = fh5['h1e'].value e_core = fh5['e_core'].value h2e = fh5['h2e'].value h2e_Si = fh5['h2e_Si'].value h2e_Sr = fh5['h2e_Sr'].value fh5.close() headnode = MPI.Get_processor_name() else: mol = None mo_coeff = None ncore = None ncas = None nvirt = None orbe = None root = None orbsym = None nelecas = None h1e_Si = None h1e_Sr = None h1e = None e_core = None h2e = None h2e_Si = None h2e_Sr = None headnode = None comm.barrier() mol = comm.bcast(mol,root=0) mo_coeff = comm.bcast(mo_coeff,root=0) ncas = comm.bcast(ncas,root=0) ncore = comm.bcast(ncore,root=0) nvirt = comm.bcast(nvirt,root=0) root = comm.bcast(root,root=0) orbsym = comm.bcast(orbsym,root=0) nelecas = comm.bcast(nelecas,root=0) orbe = comm.bcast(orbe,root=0) h1e_Si = comm.bcast(h1e_Si,root=0) h1e_Sr = comm.bcast(h1e_Sr,root=0) h1e = comm.bcast(h1e,root=0) h2e = comm.bcast(h2e,root=0) h2e_Si = comm.bcast(h2e_Si,root=0) h2e_Sr = comm.bcast(h2e_Sr,root=0) headnode = comm.bcast(headnode,root=0) e_core = comm.bcast(e_core,root=0) mo_core = mo_coeff[:,:ncore] mo_cas = mo_coeff[:,ncore:ncore+ncas] mo_virt = mo_coeff[:,ncore+ncas:] nelec = nelecas[0] + nelecas[1] if mol.symmetry and len(orbsym): orbsym = orbsym[ncore:ncore+ncas] + orbsym[:ncore] + orbsym[ncore+ncas:] orbsym = dmrg_sym.convert_orbsym(mol.groupname, orbsym) else: orbsym = [1] * (ncore+ncas+nvirt) partial_size = int(math.floor((ncore+nvirt)/float(mpi_size))) num_of_orb_begin = min(rank*partial_size, ncore+nvirt) num_of_orb_end = min((rank+1)*partial_size, ncore+nvirt) #Adjust the distrubution the non-active orbitals to make sure one processor has at most one more orbital than average. if rank < (ncore+nvirt - partial_size*mpi_size): num_of_orb_begin += rank num_of_orb_end += rank + 1 else : num_of_orb_begin += ncore+nvirt - partial_size*mpi_size num_of_orb_end += ncore+nvirt - partial_size*mpi_size if num_of_orb_begin < ncore: if num_of_orb_end < ncore: h1e_Si = h1e_Si[:,num_of_orb_begin:num_of_orb_end] h2e_Si = h2e_Si[:,num_of_orb_begin:num_of_orb_end,:,:] h1e_Sr = [] h2e_Sr = [] # elif num_of_orb_end > ncore + nvirt : # h1e_Si = h1e_Si[:,num_of_orb_begin:] # h2e_Si = h2e_Si[:,num_of_orb_begin:,:,:] # #h2e_Sr = [] # orbsym = orbsym[:ncas] + orbsym[num_of_orb_begin:] # norb = ncas + ncore + nvirt - num_of_orb_begin else : h1e_Si = h1e_Si[:,num_of_orb_begin:] h2e_Si = h2e_Si[:,num_of_orb_begin:,:,:] h1e_Sr = h1e_Sr[:num_of_orb_end - ncore,:] h2e_Sr = h2e_Sr[:num_of_orb_end - ncore,:,:,:] elif num_of_orb_begin < ncore + nvirt : if num_of_orb_end <= ncore + nvirt: h1e_Si = [] h2e_Si = [] h1e_Sr = h1e_Sr[num_of_orb_begin - ncore:num_of_orb_end - ncore,:] h2e_Sr = h2e_Sr[num_of_orb_begin - ncore:num_of_orb_end - ncore,:,:,:] # else : # h1e_Si = [] # h2e_Si = [] # h1e_Sr = h1e_Sr[num_of_orb_begin - ncore:,:] # h2e_Sr = h2e_Sr[num_of_orb_begin - ncore:,:,:,:] # orbsym = orbsym[:ncas] + orbsym[ncas+num_of_orb_begin: ] # norb = ncas + ncore + nvirt - num_of_orb_begin else : raise RuntimeError('No job for this processor. It may block MPI.COMM_WORLD.barrier') norb = ncas + num_of_orb_end - num_of_orb_begin orbsym = orbsym[:ncas] + orbsym[ncas + num_of_orb_begin:ncas + num_of_orb_end] if num_of_orb_begin >= ncore: partial_core = 0 partial_virt = num_of_orb_end - num_of_orb_begin else: if num_of_orb_end >= ncore: partial_core = ncore -num_of_orb_begin partial_virt = num_of_orb_end - ncore else: partial_core = num_of_orb_end -num_of_orb_begin partial_virt = 0 newscratch = os.path.join(os.path.abspath(scratch), str(rank)) if not os.path.exists('%s'%newscratch): os.makedirs('%s'%newscratch) os.makedirs('%s/node0'%newscratch) nevptinp = os.path.join(newscratch, os.path.basename(dmrginp)) subprocess.check_call('cp %s %s'%(dmrginp,nevptinp),shell=True) f = open(nevptinp, 'a') f.write('restart_mps_nevpt %d %d %d \n'%(ncas,partial_core, partial_virt)) f.close() tol = float(1e-15) #from subprocess import Popen #from subprocess import PIPE #print 'scratch', scratch ##p1 = Popen(['cp %s/* %d/'%(scratch, rank)],shell=True,stderr=PIPE) #p1 = Popen(['cp','%s/*'%scratch, '%d/'%rank],shell=True,stderr=PIPE) #print p1.communicate() #p2 = Popen(['cp %s/node0/* %d'%(scratch, rank)],shell=True,stderr=PIPE) ##p2 = Popen(['cp','%s/node0/*'%scratch, '%d/'%rank],shell=True,stderr=PIPE) #print p2.communicate() #call('cp %s/* %d/'%(scratch,rank),shell = True,stderr=os.devnull) #call('cp %s/node0/* %d/'%(scratch,rank),shell = True,stderr=os.devnull) # f1 =open(os.devnull,'w') # if MPI.Get_processor_name() == headnode: # subprocess.call('cp %s/* %s/'%(scratch,newscratch),stderr=f1,shell = True) # subprocess.call('cp %s/node0/* %s/node0'%(scratch,newscratch),shell = True) # else: # subprocess.call('scp %s:%s/* %s/'%(headnode,scratch,newscratch),stderr=f1,shell = True) # subprocess.call('scp %s:%s/node0/* %s/node0'%(headnode,scratch,newscratch),shell = True) # f1.close() #TODO #Use mpi rather than scp to copy the file. #To make the code robust. if rank==0: filenames = [] for fn in os.listdir('%s/node0'%scratch): if fn== 'dmrg.e' or fn== 'statefile.0.tmp' or fn== 'RestartReorder.dat' or fn.startswith('wave') or fn.startswith('Rotation'): filenames.append(fn) else: filenames = None filenames = comm.bcast(filenames, root=0) for i in range(len(filenames)): if rank == 0: with open('%s/node0/%s'%(scratch,filenames[i]),'rb') as f: data = f.read() else: data = None data = comm.bcast(data, root=0) if data==None: print 'empty file' with open('%s/node0/%s'%(newscratch,filenames[i]),'wb') as f: f.write(data) f = open('%s/FCIDUMP'%newscratch,'w') pyscf.tools.fcidump.write_head(f,norb, nelec, ms=abs(nelecas[0]-nelecas[1]), orbsym=orbsym) #h2e in active space writeh2e_sym(h2e,f,tol) #h1e in active space writeh1e_sym(h1e,f,tol) orbe =list(orbe[:ncore]) + list(orbe[ncore+ncas:]) orbe = orbe[num_of_orb_begin:num_of_orb_end] for i in range(len(orbe)): f.write('% .16f %4d %4d %4d %4d\n'%(orbe[i],i+1+ncas,i+1+ncas,0,0)) f.write('%.16f %4d %4d %4d %4d\n'%(e_core,0,0,0,0)) if (len(h2e_Sr)): writeh2e(h2e_Sr,f,tol, shift0 = ncas + partial_core+1) f.write('% 4d %4d %4d %4d %4d\n'%(0,0,0,0,0)) if (len(h2e_Si)): writeh2e(h2e_Si,f,tol, shift1 = ncas+1) f.write('% 4d %4d %4d %4d %4d\n'%(0,0,0,0,0)) if (len(h1e_Sr)): writeh1e(h1e_Sr,f,tol, shift0 = ncas + partial_core+1) f.write('% 4d %4d %4d %4d %4d\n'%(0,0,0,0,0)) if (len(h1e_Si)): writeh1e(h1e_Si,f,tol, shift1 = ncas+1) f.write('% 4d %4d %4d %4d %4d\n'%(0,0,0,0,0)) f.write('% 4d %4d %4d %4d %4d\n'%(0,0,0,0,0)) f.close() current_path = os.getcwd() os.chdir('%s'%newscratch) env = os.environ envnew = {} for k in env: if 'MPI' not in k and 'SLURM' not in k: # remove PBS and SLURM environments to prevent Block running in MPI mode envnew[k] = os.environ[k] p = subprocess.Popen(['%s %s > %s'%(blockfile,nevptinp,dmrgout)], env=envnew, shell=True) p.wait() f = open('node0/Va_%d'%root,'r') Vr_energy = float(f.readline()) Vr_norm = float(f.readline()) f.close() f = open('node0/Vi_%d'%root,'r') Vi_energy = float(f.readline()) Vi_norm = float(f.readline()) f.close() comm.barrier() #Vr_total = 0.0 #Vi_total = 0.0 Vi_total_e = comm.gather(Vi_energy,root=0) Vi_total_norm = comm.gather(Vi_norm,root=0) Vr_total_e = comm.gather(Vr_energy,root=0) Vr_total_norm = comm.gather(Vr_norm,root=0) #comm.Reduce(Vi_energy,Vi_total,op=MPI.SUM, root=0) os.chdir('%s'%current_path) if rank == 0: fh5 = h5py.File('Perturbation_%d'%root,'w') fh5['Vi/energy'] = sum(Vi_total_e) fh5['Vi/norm'] = sum(Vi_total_norm) fh5['Vr/energy'] = sum(Vr_total_e) fh5['Vr/norm'] = sum(Vr_total_norm) fh5.close()
def DMRG_COMPRESS_NEVPT(mc, maxM=500, root=0, nevptsolver=None, tol=1e-7): if (isinstance(mc, str)): mol = chkfile.load_mol(mc) fh5 = h5py.File(mc, 'r') ncas = fh5['mc/ncas'].value ncore = fh5['mc/ncore'].value nvirt = fh5['mc/nvirt'].value nelecas = fh5['mc/nelecas'].value nroots = fh5['mc/nroots'].value wfnsym = fh5['mc/wfnsym'].value fh5.close() mc_chk = mc else : mol = mc.mol ncas = mc.ncas ncore = mc.ncore nvirt = mc.mo_coeff.shape[1] - mc.ncas-mc.ncore nelecas = mc.nelecas nroots = mc.fcisolver.nroots wfnsym = mc.fcisolver.wfnsym mc_chk = 'nevpt_perturb_integral' write_chk(mc, root, mc_chk) if nevptsolver is None: nevptsolver = default_nevpt_schedule(mol,maxM, tol) nevptsolver.wfnsym = wfnsym nevptsolver.block_extra_keyword = mc.fcisolver.block_extra_keyword nevptsolver.nroots = nroots from pyscf.dmrgscf import settings nevptsolver.executable = settings.BLOCKEXE_COMPRESS_NEVPT scratch = nevptsolver.scratchDirectory nevptsolver.scratchDirectory = '' dmrgci.writeDMRGConfFile(nevptsolver, nelecas, False, with_2pdm=False, extraline=['fullrestart','nevpt_state_num %d'%root]) nevptsolver.scratchDirectory = scratch if nevptsolver.verbose >= logger.DEBUG1: inFile = os.path.join(nevptsolver.runtimeDir, nevptsolver.configFile) logger.debug1(nevptsolver, 'Block Input conf') logger.debug1(nevptsolver, open(inFile, 'r').read()) t0 = (time.clock(), time.time()) cmd = ' '.join((nevptsolver.mpiprefix, '%s/nevpt_mpi.py' % os.path.dirname(os.path.realpath(__file__)), mc_chk, nevptsolver.executable, os.path.join(nevptsolver.runtimeDir, nevptsolver.configFile), nevptsolver.outputFile, nevptsolver.scratchDirectory)) logger.debug(nevptsolver, 'DMRG_COMPRESS_NEVPT cmd %s', cmd) try: output = subprocess.check_call(cmd, shell=True) except subprocess.CalledProcessError as err: logger.error(nevptsolver, cmd) raise err if nevptsolver.verbose >= logger.DEBUG1: logger.debug1(nevptsolver, open(os.path.join(nevptsolver.scratchDirectory, '0/dmrg.out')).read()) fh5 = h5py.File('Perturbation_%d'%root,'r') Vi_e = fh5['Vi/energy'].value Vr_e = fh5['Vr/energy'].value fh5.close() logger.note(nevptsolver,'Nevpt Energy:') logger.note(nevptsolver,'Sr Subspace: E = %.14f'%( Vr_e)) logger.note(nevptsolver,'Si Subspace: E = %.14f'%( Vi_e)) logger.timer(nevptsolver,'MPS NEVPT calculation time', *t0)
from pyscf.lib import chkfile # # Check args # if len(sys.argv) != 2: raise AssertionError("Incorrect # of args\n" "Use like: python pt2.py 1\n" "Where 1 is the root you want to target.") target_state = int(sys.argv[-1]) # # Load MF orbitals # chkname = "_chk/pp_dz_b3lyp.chk" mol = chkfile.load_mol(chkname) mol.max_memory = int(1e5) # memory in MB 1e6 -> 1 TB mf = dft.RKS(mol) mf.__dict__.update(chkfile.load(chkname, "scf")) # # Load SA-MCSCF # nelecas, ncas = (4, 4) n_states = 3 weights = np.ones(n_states) / n_states mc0 = mcscf.CASSCF(mf, ncas, nelecas).state_average_(weights) mc0.fix_spin(ss=0) mc0.chkfile = "_chk/pp_dz_cas_4e_4o.chk" mc0.__dict__.update(chkfile.load(mc0.chkfile, "mcscf"))
def load_scf(chkfile): return load_mol(chkfile), load(chkfile, 'scf')
def load_mcscf(chkfile): return load_mol(chkfile), load(chkfile, 'mcscf')
def DMRG_COMPRESS_NEVPT(mc, maxM=500, root=0, nevptsolver=None, tol=1e-7, nevpt_integral=None): if isinstance(nevpt_integral, str) and h5py.is_hdf5(nevpt_integral): nevpt_integral_file = os.path.abspath(nevpt_integral) mol = chkfile.load_mol(nevpt_integral_file) fh5 = h5py.File(nevpt_integral_file, 'r') ncas = fh5['mc/ncas'][()] ncore = fh5['mc/ncore'][()] nvirt = fh5['mc/nvirt'][()] nelecas = fh5['mc/nelecas'][()] nroots = fh5['mc/nroots'][()] wfnsym = fh5['mc/wfnsym'][()] fh5.close() else: mol = mc.mol ncas = mc.ncas ncore = mc.ncore nvirt = mc.mo_coeff.shape[1] - mc.ncas - mc.ncore nelecas = mc.nelecas nroots = mc.fcisolver.nroots wfnsym = mc.fcisolver.wfnsym nevpt_integral_file = None if nevptsolver is None: nevptsolver = default_nevpt_schedule(mc.fcisolver, maxM, tol) #nevptsolver.__dict__.update(mc.fcisolver.__dict__) nevptsolver.wfnsym = wfnsym nevptsolver.block_extra_keyword = mc.fcisolver.block_extra_keyword nevptsolver.nroots = nroots nevptsolver.executable = settings.BLOCKEXE_COMPRESS_NEVPT if nevptsolver.executable == getattr(mc.fcisolver, 'executable', None): logger.warn( mc, 'DMRG executable file for nevptsolver is the same ' 'to the executable file for DMRG solver. If they are ' 'both compiled by MPI compilers, they may cause error or ' 'random results in DMRG-NEVPT calculation.') nevpt_scratch = os.path.abspath(nevptsolver.scratchDirectory) dmrg_scratch = os.path.abspath(mc.fcisolver.scratchDirectory) # Integrals are not given by the kwarg nevpt_integral if nevpt_integral_file is None: nevpt_integral_file = os.path.join(nevpt_scratch, 'nevpt_perturb_integral') write_chk(mc, root, nevpt_integral_file) conf = dmrgci.writeDMRGConfFile( nevptsolver, nelecas, False, with_2pdm=False, extraline=['fullrestart', 'nevpt_state_num %d' % root]) with open(conf, 'r') as f: block_conf = f.readlines() block_conf = [l for l in block_conf if 'prefix' not in l] block_conf = ''.join(block_conf) with h5py.File(nevpt_integral_file, 'a') as fh5: if 'dmrg.conf' in fh5: del (fh5['dmrg.conf']) fh5['dmrg.conf'] = block_conf if nevptsolver.verbose >= logger.DEBUG1: logger.debug1(nevptsolver, 'Block Input conf') logger.debug1(nevptsolver, block_conf) t0 = (time.clock(), time.time()) # function nevpt_integral_mpi is called in this cmd cmd = ' '.join( (nevptsolver.mpiprefix, os.path.realpath(os.path.join(__file__, '..', 'nevpt_mpi.py')), nevpt_integral_file, nevptsolver.executable, dmrg_scratch, nevpt_scratch)) logger.debug(nevptsolver, 'DMRG_COMPRESS_NEVPT cmd %s', cmd) try: output = subprocess.check_call(cmd, shell=True) except subprocess.CalledProcessError as err: logger.error(nevptsolver, cmd) raise err if nevptsolver.verbose >= logger.DEBUG1: logger.debug1( nevptsolver, open(os.path.join(nevpt_scratch, '0', 'dmrg.out')).read()) perturb_file = os.path.join(nevpt_scratch, '0', 'Perturbation_%d' % root) fh5 = h5py.File(perturb_file, 'r') Vi_e = fh5['Vi/energy'][()] Vr_e = fh5['Vr/energy'][()] fh5.close() logger.note(nevptsolver, 'Nevpt Energy:') logger.note(nevptsolver, 'Sr Subspace: E = %.14f' % (Vr_e)) logger.note(nevptsolver, 'Si Subspace: E = %.14f' % (Vi_e)) logger.timer(nevptsolver, 'MPS NEVPT calculation time', *t0) return perturb_file
mf.run() print('E(HF) = %s' % mf.e_tot) scf_result_dic = chkfile.load('example.chk', 'scf') mf_new = scf.RHF(mol) mf_new.__dict__.update(scf_result_dic) print('E(HF) from chkfile = %s' % mf.e_tot) myci = ci.CISD(mf).run() myci.dump_chk() print('E(CISD) = %s' % myci.e_tot) cisd_result_dic = chkfile.load('example.chk', 'cisd') myci_new = ci.CISD(mf_new) myci_new.__dict__.update(cisd_result_dic) print('E(CISD) from chkfile = %s' % myci_new.e_tot) mol_new = chkfile.load_mol('example.chk') print(numpy.allclose(mol.atom_coords(), mol_new.atom_coords())) print(numpy.allclose(mol.atom_charges(), mol_new.atom_charges())) with h5py.File('example.chk') as f: print('\nCheckpoint file is a HDF5 file. data are stored in file/directory structure.') print('/', f.keys()) print('/scf', f['scf'].keys()) print('/scf/mo_occ', f['scf/mo_occ'].value) print('/cisd', f['cisd'].keys()) print('\nMolecular object (mol) is seriealized to json format and stored') print('/mol: %s ...' % f['mol'].value[:20])
def DMRG_COMPRESS_NEVPT(mc, maxM=500, root=0, nevptsolver=None, tol=1e-7, nevpt_integral=None): if nevpt_integral: mol = chkfile.load_mol(nevpt_integral) fh5 = h5py.File(nevpt_integral, 'r') ncas = fh5['mc/ncas'].value ncore = fh5['mc/ncore'].value nvirt = fh5['mc/nvirt'].value nelecas = fh5['mc/nelecas'].value nroots = fh5['mc/nroots'].value wfnsym = fh5['mc/wfnsym'].value fh5.close() else: mol = mc.mol ncas = mc.ncas ncore = mc.ncore nvirt = mc.mo_coeff.shape[1] - mc.ncas - mc.ncore nelecas = mc.nelecas nroots = mc.fcisolver.nroots wfnsym = mc.fcisolver.wfnsym nevpt_integral_file = 'nevpt_perturb_integral' write_chk(mc, root, nevpt_integral_file) if nevptsolver is None: nevptsolver = default_nevpt_schedule(mol, maxM, tol) nevptsolver.__dict__.update(mc.fcisolver.__dict__) nevptsolver.wfnsym = wfnsym nevptsolver.block_extra_keyword = mc.fcisolver.block_extra_keyword nevptsolver.nroots = nroots nevptsolver.executable = settings.BLOCKEXE_COMPRESS_NEVPT conf = dmrgci.writeDMRGConfFile( nevptsolver, nelecas, False, with_2pdm=False, extraline=['fullrestart', 'nevpt_state_num %d' % root]) with open(conf, 'r') as f: block_conf = f.readlines() block_conf = [l for l in block_conf if 'prefix' not in l] block_conf = '\n'.join(block_conf) with h5py.File(nevpt_integral_file) as fh5: if 'dmrg.conf' in fh5: del (fh5['dmrg.conf']) fh5['dmrg.conf'] = block_conf if nevptsolver.verbose >= logger.DEBUG1: logger.debug1(nevptsolver, 'Block Input conf') logger.debug1(nevptsolver, block_conf) t0 = (time.clock(), time.time()) cmd = ' '.join( (nevptsolver.mpiprefix, os.path.realpath(os.path.join(__file__, '..', 'nevpt_mpi.py')), nevpt_integral_file, nevptsolver.executable, mc.fcisolver.scratchDirectory, nevptsolver.scratchDirectory)) logger.debug(nevptsolver, 'DMRG_COMPRESS_NEVPT cmd %s', cmd) try: output = subprocess.check_call(cmd, shell=True) except subprocess.CalledProcessError as err: logger.error(nevptsolver, cmd) raise err if nevptsolver.verbose >= logger.DEBUG1: logger.debug1( nevptsolver, open(os.path.join(nevptsolver.scratchDirectory, '0/dmrg.out')).read()) fh5 = h5py.File('Perturbation_%d' % root, 'r') Vi_e = fh5['Vi/energy'].value Vr_e = fh5['Vr/energy'].value fh5.close() logger.note(nevptsolver, 'Nevpt Energy:') logger.note(nevptsolver, 'Sr Subspace: E = %.14f' % (Vr_e)) logger.note(nevptsolver, 'Si Subspace: E = %.14f' % (Vi_e)) logger.timer(nevptsolver, 'MPS NEVPT calculation time', *t0)
def nevpt_integral_mpi(mc_chkfile,blockfile,dmrginp,dmrgout,scratch): from mpi4py import MPI comm = MPI.COMM_WORLD mpi_size = MPI.COMM_WORLD.Get_size() rank = comm.Get_rank() if rank == 0: mol = chkfile.load_mol(mc_chkfile) fh5 = h5py.File(mc_chkfile,'r') mo_coeff = fh5['mc/mo'].value ncore = fh5['mc/ncore'].value ncas = fh5['mc/ncas'].value nvirt = fh5['mc/nvirt'].value orbe = fh5['mc/orbe'].value root = fh5['mc/root'].value orbsym = list(fh5['mc/orbsym'].value) nelecas = fh5['mc/nelecas'].value h1e_Si = fh5['h1e_Si'].value h1e_Sr = fh5['h1e_Sr'].value h1e = fh5['h1e'].value e_core = fh5['e_core'].value h2e = fh5['h2e'].value h2e_Si = fh5['h2e_Si'].value h2e_Sr = fh5['h2e_Sr'].value fh5.close() headnode = MPI.Get_processor_name() else: mol = None mo_coeff = None ncore = None ncas = None nvirt = None orbe = None root = None orbsym = None nelecas = None h1e_Si = None h1e_Sr = None h1e = None e_core = None h2e = None h2e_Si = None h2e_Sr = None headnode = None comm.barrier() mol = comm.bcast(mol,root=0) mo_coeff = comm.bcast(mo_coeff,root=0) ncas = comm.bcast(ncas,root=0) ncore = comm.bcast(ncore,root=0) nvirt = comm.bcast(nvirt,root=0) root = comm.bcast(root,root=0) orbsym = comm.bcast(orbsym,root=0) nelecas = comm.bcast(nelecas,root=0) orbe = comm.bcast(orbe,root=0) h1e_Si = comm.bcast(h1e_Si,root=0) h1e_Sr = comm.bcast(h1e_Sr,root=0) h1e = comm.bcast(h1e,root=0) h2e = comm.bcast(h2e,root=0) h2e_Si = comm.bcast(h2e_Si,root=0) h2e_Sr = comm.bcast(h2e_Sr,root=0) headnode = comm.bcast(headnode,root=0) e_core = comm.bcast(e_core,root=0) mo_core = mo_coeff[:,:ncore] mo_cas = mo_coeff[:,ncore:ncore+ncas] mo_virt = mo_coeff[:,ncore+ncas:] nelec = nelecas[0] + nelecas[1] if mol.symmetry and len(orbsym): orbsym = orbsym[ncore:ncore+ncas] + orbsym[:ncore] + orbsym[ncore+ncas:] orbsym = dmrg_sym.convert_orbsym(mol.groupname, orbsym) else: orbsym = [1] * (ncore+ncas+nvirt) partial_size = int(math.floor((ncore+nvirt)/float(mpi_size))) num_of_orb_begin = min(rank*partial_size, ncore+nvirt) num_of_orb_end = min((rank+1)*partial_size, ncore+nvirt) #Adjust the distrubution the non-active orbitals to make sure one processor has at most one more orbital than average. if rank < (ncore+nvirt - partial_size*mpi_size): num_of_orb_begin += rank num_of_orb_end += rank + 1 else : num_of_orb_begin += ncore+nvirt - partial_size*mpi_size num_of_orb_end += ncore+nvirt - partial_size*mpi_size if num_of_orb_begin < ncore: if num_of_orb_end < ncore: h1e_Si = h1e_Si[:,num_of_orb_begin:num_of_orb_end] h2e_Si = h2e_Si[:,num_of_orb_begin:num_of_orb_end,:,:] h1e_Sr = [] h2e_Sr = [] # elif num_of_orb_end > ncore + nvirt : # h1e_Si = h1e_Si[:,num_of_orb_begin:] # h2e_Si = h2e_Si[:,num_of_orb_begin:,:,:] # #h2e_Sr = [] # orbsym = orbsym[:ncas] + orbsym[num_of_orb_begin:] # norb = ncas + ncore + nvirt - num_of_orb_begin else : h1e_Si = h1e_Si[:,num_of_orb_begin:] h2e_Si = h2e_Si[:,num_of_orb_begin:,:,:] h1e_Sr = h1e_Sr[:num_of_orb_end - ncore,:] h2e_Sr = h2e_Sr[:num_of_orb_end - ncore,:,:,:] elif num_of_orb_begin < ncore + nvirt : if num_of_orb_end <= ncore + nvirt: h1e_Si = [] h2e_Si = [] h1e_Sr = h1e_Sr[num_of_orb_begin - ncore:num_of_orb_end - ncore,:] h2e_Sr = h2e_Sr[num_of_orb_begin - ncore:num_of_orb_end - ncore,:,:,:] # else : # h1e_Si = [] # h2e_Si = [] # h1e_Sr = h1e_Sr[num_of_orb_begin - ncore:,:] # h2e_Sr = h2e_Sr[num_of_orb_begin - ncore:,:,:,:] # orbsym = orbsym[:ncas] + orbsym[ncas+num_of_orb_begin: ] # norb = ncas + ncore + nvirt - num_of_orb_begin else : raise RuntimeError('No job for this processor. It may block MPI.COMM_WORLD.barrier') norb = ncas + num_of_orb_end - num_of_orb_begin orbsym = orbsym[:ncas] + orbsym[ncas + num_of_orb_begin:ncas + num_of_orb_end] if num_of_orb_begin >= ncore: partial_core = 0 partial_virt = num_of_orb_end - num_of_orb_begin else: if num_of_orb_end >= ncore: partial_core = ncore -num_of_orb_begin partial_virt = num_of_orb_end - ncore else: partial_core = num_of_orb_end -num_of_orb_begin partial_virt = 0 newscratch = os.path.join(scratch, str(rank)) if not os.path.exists('%s'%newscratch): os.makedirs('%s'%newscratch) os.makedirs('%s/node0'%newscratch) subprocess.check_call('cp %s %s/%s'%(dmrginp,newscratch,dmrginp),shell=True) f = open('%s/%s'%(newscratch,dmrginp), 'a') f.write('restart_mps_nevpt %d %d %d \n'%(ncas,partial_core, partial_virt)) f.close() tol = float(1e-15) #from subprocess import Popen #from subprocess import PIPE #print 'scratch', scratch ##p1 = Popen(['cp %s/* %d/'%(scratch, rank)],shell=True,stderr=PIPE) #p1 = Popen(['cp','%s/*'%scratch, '%d/'%rank],shell=True,stderr=PIPE) #print p1.communicate() #p2 = Popen(['cp %s/node0/* %d'%(scratch, rank)],shell=True,stderr=PIPE) ##p2 = Popen(['cp','%s/node0/*'%scratch, '%d/'%rank],shell=True,stderr=PIPE) #print p2.communicate() #call('cp %s/* %d/'%(scratch,rank),shell = True,stderr=os.devnull) #call('cp %s/node0/* %d/'%(scratch,rank),shell = True,stderr=os.devnull) # f1 =open(os.devnull,'w') # if MPI.Get_processor_name() == headnode: # subprocess.call('cp %s/* %s/'%(scratch,newscratch),stderr=f1,shell = True) # subprocess.call('cp %s/node0/* %s/node0'%(scratch,newscratch),shell = True) # else: # subprocess.call('scp %s:%s/* %s/'%(headnode,scratch,newscratch),stderr=f1,shell = True) # subprocess.call('scp %s:%s/node0/* %s/node0'%(headnode,scratch,newscratch),shell = True) # f1.close() #TODO #Use mpi rather than scp to copy the file. #To make the code robust. if rank==0: filenames = [] for fn in os.listdir('%s/node0'%scratch): if fn== 'dmrg.e' or fn== 'statefile.0.tmp' or fn== 'RestartReorder.dat' or fn.startswith('wave') or fn.startswith('Rotation'): filenames.append(fn) else: filenames = None filenames = comm.bcast(filenames, root=0) for i in range(len(filenames)): if rank == 0: with open('%s/node0/%s'%(scratch,filenames[i]),'rb') as f: data = f.read() else: data = None data = comm.bcast(data, root=0) if data==None: print 'empty file' with open('%s/node0/%s'%(newscratch,filenames[i]),'wb') as f: f.write(data) f = open('%s/FCIDUMP'%newscratch,'w') pyscf.tools.fcidump.write_head(f,norb, nelec, ms=abs(nelecas[0]-nelecas[1]), orbsym=orbsym) #h2e in active space writeh2e_sym(h2e,f,tol) #h1e in active space writeh1e_sym(h1e,f,tol) orbe =list(orbe[:ncore]) + list(orbe[ncore+ncas:]) orbe = orbe[num_of_orb_begin:num_of_orb_end] for i in range(len(orbe)): f.write('% .16f %4d %4d %4d %4d\n'%(orbe[i],i+1+ncas,i+1+ncas,0,0)) f.write('%.16f %4d %4d %4d %4d\n'%(e_core,0,0,0,0)) if (len(h2e_Sr)): writeh2e(h2e_Sr,f,tol, shift0 = ncas + partial_core+1) f.write('% 4d %4d %4d %4d %4d\n'%(0,0,0,0,0)) if (len(h2e_Si)): writeh2e(h2e_Si,f,tol, shift1 = ncas+1) f.write('% 4d %4d %4d %4d %4d\n'%(0,0,0,0,0)) if (len(h1e_Sr)): writeh1e(h1e_Sr,f,tol, shift0 = ncas + partial_core+1) f.write('% 4d %4d %4d %4d %4d\n'%(0,0,0,0,0)) if (len(h1e_Si)): writeh1e(h1e_Si,f,tol, shift1 = ncas+1) f.write('% 4d %4d %4d %4d %4d\n'%(0,0,0,0,0)) f.write('% 4d %4d %4d %4d %4d\n'%(0,0,0,0,0)) f.close() current_path = os.getcwd() os.chdir('%s'%newscratch) env = os.environ envnew = {} for k in env: if 'MPI' not in k and 'SLURM' not in k: # remove PBS and SLURM environments to prevent Block running in MPI mode envnew[k] = os.environ[k] p = subprocess.Popen(['%s %s > %s'%(blockfile,dmrginp,dmrgout)], env=envnew, shell=True) p.wait() f = open('node0/Va_%d'%root,'r') Vr_energy = float(f.readline()) Vr_norm = float(f.readline()) f.close() f = open('node0/Vi_%d'%root,'r') Vi_energy = float(f.readline()) Vi_norm = float(f.readline()) f.close() comm.barrier() #Vr_total = 0.0 #Vi_total = 0.0 Vi_total_e = comm.gather(Vi_energy,root=0) Vi_total_norm = comm.gather(Vi_norm,root=0) Vr_total_e = comm.gather(Vr_energy,root=0) Vr_total_norm = comm.gather(Vr_norm,root=0) #comm.Reduce(Vi_energy,Vi_total,op=MPI.SUM, root=0) os.chdir('%s'%current_path) if rank == 0: fh5 = h5py.File('Perturbation_%d'%root,'w') fh5['Vi/energy'] = sum(Vi_total_e) fh5['Vi/norm'] = sum(Vi_total_norm) fh5['Vr/energy'] = sum(Vr_total_e) fh5['Vr/norm'] = sum(Vr_total_norm) fh5.close()
def DMRG_COMPRESS_NEVPT(mc, maxM=500, root=0, nevptsolver=None, tol=1e-7): if (isinstance(mc, str)): mol = chkfile.load_mol(mc) fh5 = h5py.File(mc, 'r') ncas = fh5['mc/ncas'].value ncore = fh5['mc/ncore'].value nvirt = fh5['mc/nvirt'].value nelecas = fh5['mc/nelecas'].value nroots = fh5['mc/nroots'].value wfnsym = fh5['mc/wfnsym'].value fh5.close() mc_chk = mc else : mol = mc.mol ncas = mc.ncas ncore = mc.ncore nvirt = mc.mo_coeff.shape[1] - mc.ncas-mc.ncore nelecas = mc.nelecas nroots = mc.fcisolver.nroots wfnsym = mc.fcisolver.wfnsym mc_chk = 'nevpt_perturb_integral' write_chk(mc, root, mc_chk) if nevptsolver is None: nevptsolver = default_nevpt_schedule(mol,maxM, tol) nevptsolver.wfnsym = wfnsym nevptsolver.block_extra_keyword = mc.fcisolver.block_extra_keyword nevptsolver.nroots = nroots from pyscf.dmrgscf import settings nevptsolver.executable = settings.BLOCKEXE_COMPRESS_NEVPT scratch = nevptsolver.scratchDirectory nevptsolver.scratchDirectory = '' dmrgci.writeDMRGConfFile(nevptsolver, nelecas, False, with_2pdm=False, extraline=['fullrestart','nevpt_state_num %d'%root]) nevptsolver.scratchDirectory = scratch if nevptsolver.verbose >= logger.DEBUG1: inFile = os.path.join(nevptsolver.runtimeDir, nevptsolver.configFile) logger.debug1(nevptsolver, 'Block Input conf') logger.debug1(nevptsolver, open(inFile, 'r').read()) t0 = (time.clock(), time.time()) cmd = ' '.join((nevptsolver.mpiprefix, '%s/nevpt_mpi.py' % os.path.dirname(os.path.realpath(__file__)), mc_chk, nevptsolver.executable, nevptsolver.configFile, nevptsolver.outputFile, nevptsolver.scratchDirectory)) logger.debug(nevptsolver, 'DMRG_COMPRESS_NEVPT cmd %s', cmd) try: output = subprocess.check_call(cmd, shell=True) except subprocess.CalledProcessError as err: logger.error(nevptsolver, cmd) raise err if nevptsolver.verbose >= logger.DEBUG1: logger.debug1(nevptsolver, open(os.path.join(nevptsolver.scratchDirectory, '0/dmrg.out')).read()) fh5 = h5py.File('Perturbation_%d'%root,'r') Vi_e = fh5['Vi/energy'].value Vr_e = fh5['Vr/energy'].value fh5.close() logger.note(nevptsolver,'Nevpt Energy:') logger.note(nevptsolver,'Sr Subspace: E = %.14f'%( Vr_e)) logger.note(nevptsolver,'Si Subspace: E = %.14f'%( Vi_e)) logger.timer(nevptsolver,'MPS NEVPT calculation time', *t0)
def DMRG_COMPRESS_NEVPT(mc, maxM=500, root=0, nevptsolver=None, tol=1e-7, nevpt_integral=None): if isinstance(nevpt_integral, str) and h5py.is_hdf5(nevpt_integral): nevpt_integral_file = os.path.abspath(nevpt_integral) mol = chkfile.load_mol(nevpt_integral_file) fh5 = h5py.File(nevpt_integral_file, 'r') ncas = fh5['mc/ncas'].value ncore = fh5['mc/ncore'].value nvirt = fh5['mc/nvirt'].value nelecas = fh5['mc/nelecas'].value nroots = fh5['mc/nroots'].value wfnsym = fh5['mc/wfnsym'].value fh5.close() else : mol = mc.mol ncas = mc.ncas ncore = mc.ncore nvirt = mc.mo_coeff.shape[1] - mc.ncas-mc.ncore nelecas = mc.nelecas nroots = mc.fcisolver.nroots wfnsym = mc.fcisolver.wfnsym nevpt_integral_file = None if nevptsolver is None: nevptsolver = default_nevpt_schedule(mc.fcisolver, maxM, tol) #nevptsolver.__dict__.update(mc.fcisolver.__dict__) nevptsolver.wfnsym = wfnsym nevptsolver.block_extra_keyword = mc.fcisolver.block_extra_keyword nevptsolver.nroots = nroots nevptsolver.executable = settings.BLOCKEXE_COMPRESS_NEVPT if nevptsolver.executable == getattr(mc.fcisolver, 'executable', None): logger.warn(mc, 'DMRG executable file for nevptsolver %s is the same ' 'to the executable file for DMRG solver %s. If they are ' 'both compiled by MPI compilers, they may cause error or ' 'random results in DMRG-NEVPT calculation.') nevpt_scratch = os.path.abspath(nevptsolver.scratchDirectory) dmrg_scratch = os.path.abspath(mc.fcisolver.scratchDirectory) # Integrals are not given by the kwarg nevpt_integral if nevpt_integral_file is None: nevpt_integral_file = os.path.join(nevpt_scratch, 'nevpt_perturb_integral') write_chk(mc, root, nevpt_integral_file) conf = dmrgci.writeDMRGConfFile(nevptsolver, nelecas, False, with_2pdm=False, extraline=['fullrestart','nevpt_state_num %d'%root]) with open(conf, 'r') as f: block_conf = f.readlines() block_conf = [l for l in block_conf if 'prefix' not in l] block_conf = ''.join(block_conf) with h5py.File(nevpt_integral_file) as fh5: if 'dmrg.conf' in fh5: del(fh5['dmrg.conf']) fh5['dmrg.conf'] = block_conf if nevptsolver.verbose >= logger.DEBUG1: logger.debug1(nevptsolver, 'Block Input conf') logger.debug1(nevptsolver, block_conf) t0 = (time.clock(), time.time()) # function nevpt_integral_mpi is called in this cmd cmd = ' '.join((nevptsolver.mpiprefix, os.path.realpath(os.path.join(__file__, '..', 'nevpt_mpi.py')), nevpt_integral_file, nevptsolver.executable, dmrg_scratch, nevpt_scratch)) logger.debug(nevptsolver, 'DMRG_COMPRESS_NEVPT cmd %s', cmd) try: output = subprocess.check_call(cmd, shell=True) except subprocess.CalledProcessError as err: logger.error(nevptsolver, cmd) raise err if nevptsolver.verbose >= logger.DEBUG1: logger.debug1(nevptsolver, open(os.path.join(nevpt_scratch, '0', 'dmrg.out')).read()) perturb_file = os.path.join(nevpt_scratch, '0', 'Perturbation_%d'%root) fh5 = h5py.File(perturb_file, 'r') Vi_e = fh5['Vi/energy'].value Vr_e = fh5['Vr/energy'].value fh5.close() logger.note(nevptsolver,'Nevpt Energy:') logger.note(nevptsolver,'Sr Subspace: E = %.14f'%( Vr_e)) logger.note(nevptsolver,'Si Subspace: E = %.14f'%( Vi_e)) logger.timer(nevptsolver,'MPS NEVPT calculation time', *t0) return perturb_file