def psi4_clean(): """Function to put Psi4 back to a clean state. In particular deletes scratch files and resets Psi variables. Call as last part of the real deriv call to Psi4. """ psi4.clean_variables() psi4.clean()
def __init__(self, molecule, basis, numpy_memory=2.e9, scf_type="PK", use_c=False): # Set defaults maxiter = 40 E_conv = 1.0E-6 D_conv = 1.0E-3 # Integral generation from Psi4's MintsHelper start_time = time.time() self.basis_name = basis self.molecule = molecule self.basis = pc.BasisSet.build(self.molecule, "ORBITAL", basis) self.mints = pc.MintsHelper(self.basis) self.S = np.asarray(self.mints.ao_overlap()) # Get nbf and ndocc for closed shell molecules self.nbf = self.S.shape[0] self.nel = sum(self.molecule.Z(n) for n in range(self.molecule.natom())) self.nel -= self.molecule.molecular_charge() if not (self.nel / 2.0).is_integer(): raise ValueError("RHF: Molecule did not have an even number of electrons!") self.ndocc = int(self.nel / 2.0) print('\nNumber of occupied orbitals: %d' % self.ndocc) print('Number of basis functions: %d' % self.nbf) # Run a quick check to make sure everything will fit into memory I_Size = (self.nbf**4) * 8.e-9 print("\nSize of the ERI tensor will be %4.2f GB." % I_Size) # Estimate memory usage memory_footprint = I_Size * 1.5 if I_Size > numpy_memory: pc.clean() raise Exception("Estimated memory utilization (%4.2f GB) exceeds numpy_memory \ limit of %4.2f GB." % (memory_footprint, numpy_memory)) # Compute required quantities for SCF self.V = np.asarray(self.mints.ao_potential()) self.T = np.asarray(self.mints.ao_kinetic()) # self.I = np.asarray(self.mints.ao_eri()) self.JK = jk.build_JK(self.molecule, self.basis_name, scf_type, use_c) self.Enuc = self.molecule.nuclear_repulsion_energy() print('\nTotal time taken for integrals: %.3f seconds.' % (time.time() - start_time)) t = time.time() # Build H_core self.H = self.T + self.V # Orthogonalizer A = S^(-1/2) using Psi4's matrix power. A = self.mints.ao_overlap() A.power(-0.5, 1.e-16) self.A = np.asarray(A) print('\nTotal time taken for setup: %.3f seconds' % (time.time() - start_time))
def _clean_psi_environ(do_clean: bool): """Reset work environment to new Psi4 instance state. This includes global variables (P::e.globals, P::e.arrays, P::e.options) and any non-explicitly-retained PSIO-managed scratch files. """ if do_clean: core.clean_variables() core.clean_options() core.clean()
def compute_energy(self, maxiter=12, E_conv=1.e-6, D_conv=1.e-4): print('\nStarting SCF iterations:\n') t = time.time() E = 0.0 Eold = 0.0 Dold = np.zeros_like(self.H) self.form_orbitals(self.H) for SCF_ITER in range(1, maxiter + 1): # Build Fock matrix self.build_fock(self.D) # Build DIIS error vector diis_e = np.dot(self.F, self.D).dot(self.S) - np.dot( self.S, self.D).dot(self.F) # Make sure that error is normalized! diis_e = (self.A).dot(diis_e).dot(self.A) # SCF energy and update SCF_E = np.einsum('pq,pq->', self.F + self.H, self.D) + self.Enuc dRMS = np.mean(diis_e**2)**0.5 print( 'SCF Iteration %3d: Energy = %4.16f dE = % 1.5E dRMS = %1.5E' % (SCF_ITER, SCF_E, (SCF_E - Eold), dRMS)) if (abs(SCF_E - Eold) < E_conv) and (dRMS < D_conv): break Eold = SCF_E Dold = self.D.copy() self.form_orbitals(self.F) if SCF_ITER == maxiter: pc.clean() raise Exception("Maximum number of SCF cycles exceeded.") print('Total time for SCF iterations: %.3f seconds \n' % (time.time() - t)) print('Final SCF energy: %.8f hartree' % SCF_E) self.SCF_E = SCF_E return SCF_E
def __exit__(self, type, value, traceback): for calc in (calcid(*x) for x in itertools.product(('m1', 'm2', 'd'), ('m', 'd'), ('low', 'high'))): core.IO.set_default_namespace(self.fmt_ns(calc)) core.IOManager.shared_object().set_specific_retention( psif.PSIF_DFSCF_BJ, False) try: extras.clean_numpy_files() except OSError: pass extras.numpy_files = [] core.clean() os.chdir(self._original_path)
def compute(self, mol_name='m1', basis_center='m', basis_quality='low', mp2=False, mp2_dm=False, save_jk=False): calc = calcid(mol_name, basis_center, basis_quality) molecule = self.molecule(calc) molecule.set_name(self.fmt_ns(calc)) basis = self.basis_sets[basis_quality] self._banner(calc, mp2, mp2_dm) optstash = p4util.optproc.OptionsState(['SCF', 'DF_INTS_IO'], ['DF_INTS_IO'], ['SCF', 'GUESS']) self._init_ns(calc) self._init_df(calc) with psiopts( 'SCF_TYPE DF', 'MP2_TYPE DF', 'BASIS %s' % basis, 'DF_BASIS_SCF %s-JKFIT' % basis, 'DF_BASIS_MP2 %s-RI' % basis, 'SCF SAVE_JK %s' % save_jk, 'ONEPDM TRUE', 'NUM_FROZEN_DOCC %d' % nfrozen_core(molecule), ): wfn = scf_helper('scf', molecule=molecule) assert nfrozen_core(molecule) == wfn.nfrzc() if mp2 and not mp2_dm: wfn = run_dfmp2('df-mp2', molecule=molecule, ref_wfn=wfn) if mp2 and mp2_dm: wfn = run_dfmp2_gradient('df-mp2', molecule=molecule, ref_wfn=wfn) if mp2_dm and not mp2: raise ValueError('These options dont make sense') self.wfn_cache[calc] = wfn optstash.restore() core.clean() return wfn
def compute_energy(self, maxiter=12, E_conv=1.e-6, D_conv=1.e-4): print('\nStarting SCF iterations:\n') t = time.time() E = 0.0 Eold = 0.0 Dold = np.zeros_like(self.H) self.form_orbitals(self.H) for SCF_ITER in range(1, maxiter + 1): # Build Fock matrix self.build_fock(self.D) # Build DIIS error vector diis_e = np.dot(self.F, self.D).dot(self.S) - np.dot(self.S, self.D).dot(self.F) # Make sure that error is normalized! diis_e = (self.A).dot(diis_e).dot(self.A) # SCF energy and update SCF_E = np.einsum('pq,pq->', self.F + self.H, self.D) + self.Enuc dRMS = np.mean(diis_e**2)**0.5 print('SCF Iteration %3d: Energy = %4.16f dE = % 1.5E dRMS = %1.5E' % (SCF_ITER, SCF_E, (SCF_E - Eold), dRMS)) if (abs(SCF_E - Eold) < E_conv) and (dRMS < D_conv): break Eold = SCF_E Dold = self.D.copy() self.form_orbitals(self.F) if SCF_ITER == maxiter: pc.clean() raise Exception("Maximum number of SCF cycles exceeded.") print('Total time for SCF iterations: %.3f seconds \n' % (time.time() - t)) print('Final SCF energy: %.8f hartree' % SCF_E) self.SCF_E = SCF_E return SCF_E
def fd_db_run(self, executable, nThreads=1, jobs_in_progress=False): self.pr('(ROA) Running finite-difference computations.\n') cwd = os.getcwd() + '/' # Want to change to json later. def runDisp(subDir): rootDir = os.getcwd() + '/' self.pr("Running displacement %s\n" % subDir) rc = subprocess.run(executable, cwd=rootDir + '/' + subDir) if rc.returncode != 0: raise ("Tensor calculation failed.") def batch(batch_list, batch_size): b = [] for elem in batch_list: b.append(elem) if len(b) == batch_size: yield b b.clear() yield b self.update_status_fd_db(print_status=True, jobs_in_progress=False) todo = [ lbl for lbl, info in self.db['jobs'].items() if info['status'] == 'not_started' ] self.pr('(ROA) Remaining jobs todo: {}\n'.format(str(todo))) fd_threads = [] for b in batch(todo, nThreads): fd_threads.clear() for job_lbl in b: t = threading.Thread(target=runDisp, args=(job_lbl, )) fd_threads.append(t) t.start() for t in fd_threads: t.join() core.clean()
def database(name, db_name, **kwargs): r"""Function to access the molecule objects and reference energies of popular chemical databases. :aliases: db() :returns: (*float*) Mean absolute deviation of the database in kcal/mol :PSI variables: .. hlist:: :columns: 1 * :psivar:`db_name DATABASE MEAN SIGNED DEVIATION` * :psivar:`db_name DATABASE MEAN ABSOLUTE DEVIATION` * :psivar:`db_name DATABASE ROOT-MEAN-SQUARE DEVIATION` * Python dictionaries of results accessible as ``DB_RGT`` and ``DB_RXN``. .. note:: It is very easy to make a database from a collection of xyz files using the script :source:`psi4/share/psi4/scripts/ixyz2database.py`. See :ref:`sec:createDatabase` for details. .. caution:: Some features are not yet implemented. Buy a developer some coffee. - In sow/reap mode, use only global options (e.g., the local option set by ``set scf scf_type df`` will not be respected). .. note:: To access a database that is not embedded in a |PSIfour| distribution, add the path to the directory containing the database to the environment variable :envvar:`PYTHONPATH`. :type name: str :param name: ``'scf'`` || ``'sapt0'`` || ``'ccsd(t)'`` || etc. First argument, usually unlabeled. Indicates the computational method to be applied to the database. May be any valid argument to :py:func:`psi4.driver.energy`. :type db_name: str :param db_name: ``'BASIC'`` || ``'S22'`` || ``'HTBH'`` || etc. Second argument, usually unlabeled. Indicates the requested database name, matching (case insensitive) the name of a python file in ``psi4/share/databases`` or :envvar:`PYTHONPATH`. Consult that directory for available databases and literature citations. :type func: :ref:`function <op_py_function>` :param func: |dl| ``energy`` |dr| || ``optimize`` || ``cbs`` Indicates the type of calculation to be performed on each database member. The default performs a single-point ``energy('name')``, while ``optimize`` perfoms a geometry optimization on each reagent, and ``cbs`` performs a compound single-point energy. If a nested series of python functions is intended (see :ref:`sec:intercalls`), use keyword ``db_func`` instead of ``func``. :type mode: str :param mode: |dl| ``'continuous'`` |dr| || ``'sow'`` || ``'reap'`` Indicates whether the calculations required to complete the database are to be run in one file (``'continuous'``) or are to be farmed out in an embarrassingly parallel fashion (``'sow'``/``'reap'``). For the latter, run an initial job with ``'sow'`` and follow instructions in its output file. :type cp: :ref:`boolean <op_py_boolean>` :param cp: ``'on'`` || |dl| ``'off'`` |dr| Indicates whether counterpoise correction is employed in computing interaction energies. Use this option and NOT the ``bsse_type="cp"`` function for BSSE correction in database(). Option available (See :ref:`sec:availableDatabases`) only for databases of bimolecular complexes. :type rlxd: :ref:`boolean <op_py_boolean>` :param rlxd: ``'on'`` || |dl| ``'off'`` |dr| Indicates whether correction for deformation energy is employed in computing interaction energies. Option available (See :ref:`sec:availableDatabases`) only for databases of bimolecular complexes with non-frozen monomers, e.g., HBC6. :type symm: :ref:`boolean <op_py_boolean>` :param symm: |dl| ``'on'`` |dr| || ``'off'`` Indicates whether the native symmetry of the database reagents is employed (``'on'``) or whether it is forced to :math:`C_1` symmetry (``'off'``). Some computational methods (e.g., SAPT) require no symmetry, and this will be set by database(). :type zpe: :ref:`boolean <op_py_boolean>` :param zpe: ``'on'`` || |dl| ``'off'`` |dr| Indicates whether zero-point-energy corrections are appended to single-point energy values. Option valid only for certain thermochemical databases. Disabled until Hessians ready. :type benchmark: str :param benchmark: |dl| ``'default'`` |dr| || ``'S22A'`` || etc. Indicates whether a non-default set of reference energies, if available (See :ref:`sec:availableDatabases`), are employed for the calculation of error statistics. :type tabulate: List[str] :param tabulate: |dl| ``[]`` |dr| || ``['scf total energy', 'natom']`` || etc. Indicates whether to form tables of variables other than the primary requested energy. Available for any PSI variable. :type subset: Union[str, List[str]] :param subset: Indicates a subset of the full database to run. This is a very flexible option and can be used in three distinct ways, outlined below. Note that two take a string and the last takes an array. See :ref:`sec:availableDatabases` for available values. * ``'small'`` || ``'large'`` || ``'equilibrium'`` Calls predefined subsets of the requested database, either ``'small'``, a few of the smallest database members, ``'large'``, the largest of the database members, or ``'equilibrium'``, the equilibrium geometries for a database composed of dissociation curves. * ``'BzBz_S'`` || ``'FaOOFaON'`` || ``'ArNe'`` || ``'HB'`` || etc. For databases composed of dissociation curves, or otherwise divided into subsets, individual curves and subsets can be called by name. Consult the database python files for available molecular systems (case insensitive). * ``[1,2,5]`` || ``['1','2','5']`` || ``['BzMe-3.5', 'MeMe-5.0']`` || etc. Specify a list of database members to run. Consult the database python files for available molecular systems. This is the only portion of database input that is case sensitive; choices for this keyword must match the database python file. :examples: >>> # [1] Two-stage SCF calculation on short, equilibrium, and long helium dimer >>> db('scf','RGC10',cast_up='sto-3g',subset=['HeHe-0.85','HeHe-1.0','HeHe-1.5'], tabulate=['scf total energy','natom']) >>> # [2] Counterpoise-corrected interaction energies for three complexes in S22 >>> # Error statistics computed wrt an old benchmark, S22A >>> database('mp2','S22',cp=1,subset=[16,17,8],benchmark='S22A') >>> # [3] SAPT0 on the neon dimer dissociation curve >>> db('sapt0',subset='NeNe',cp=0,symm=0,db_name='RGC10') >>> # [4] Optimize system 1 in database S22, producing tables of scf and mp2 energy >>> db('mp2','S22',db_func=optimize,subset=[1], tabulate=['mp2 total energy','current energy']) >>> # [5] CCSD on the smallest systems of HTBH, a hydrogen-transfer database >>> database('ccsd','HTBH',subset='small', tabulate=['ccsd total energy', 'mp2 total energy']) """ lowername = name #TODO kwargs = p4util.kwargs_lower(kwargs) # Wrap any positional arguments into kwargs (for intercalls among wrappers) if not ('name' in kwargs) and name: kwargs['name'] = name #.lower() if not ('db_name' in kwargs) and db_name: kwargs['db_name'] = db_name # Establish function to call func = kwargs.pop('db_func', kwargs.pop('func', energy)) kwargs['db_func'] = func # Bounce to CP if bsse kwarg (someday) if kwargs.get('bsse_type', None) is not None: raise ValidationError( """Database: Cannot specify bsse_type for database. Use the cp keyword withing database instead.""" ) allowoptexceeded = kwargs.get('allowoptexceeded', False) optstash = p4util.OptionsState(['WRITER_FILE_LABEL'], ['SCF', 'REFERENCE']) # Wrapper wholly defines molecule. discard any passed-in kwargs.pop('molecule', None) # Paths to search for database files: here + PSIPATH + library + PYTHONPATH db_paths = [] db_paths.append(os.getcwd()) db_paths.extend(os.environ.get('PSIPATH', '').split(os.path.pathsep)) db_paths.append(os.path.join(core.get_datadir(), 'databases')) db_paths.append(os.path.dirname(__file__)) db_paths = list(map(os.path.abspath, db_paths)) sys.path[1:1] = db_paths # TODO this should be modernized a la interface_cfour # Define path and load module for requested database database = p4util.import_ignorecase(db_name) if database is None: core.print_out('\nPython module for database %s failed to load\n\n' % (db_name)) core.print_out('\nSearch path that was tried:\n') core.print_out(", ".join(map(str, sys.path))) raise ValidationError("Python module loading problem for database " + str(db_name)) else: dbse = database.dbse HRXN = database.HRXN ACTV = database.ACTV RXNM = database.RXNM BIND = database.BIND TAGL = database.TAGL GEOS = database.GEOS try: DATA = database.DATA except AttributeError: DATA = {} user_writer_file_label = core.get_global_option('WRITER_FILE_LABEL') user_reference = core.get_global_option('REFERENCE') # Configuration based upon e_name & db_name options # Force non-supramolecular if needed if not hasattr(lowername, '__call__') and re.match(r'^.*sapt', lowername): try: database.ACTV_SA except AttributeError: raise ValidationError( 'Database %s not suitable for non-supramolecular calculation.' % (db_name)) else: ACTV = database.ACTV_SA # Force open-shell if needed openshell_override = 0 if user_reference in ['RHF', 'RKS']: try: database.isOS except AttributeError: pass else: if p4util.yes.match(str(database.isOS)): openshell_override = 1 core.print_out( '\nSome reagents in database %s require an open-shell reference; will be reset to UHF/UKS as needed.\n' % (db_name)) # Configuration based upon database keyword options # Option symmetry- whether symmetry treated normally or turned off (currently req'd for dfmp2 & dft) db_symm = kwargs.get('symm', True) symmetry_override = 0 if db_symm is False: symmetry_override = 1 elif db_symm is True: pass else: raise ValidationError("""Symmetry mode '%s' not valid.""" % (db_symm)) # Option mode of operation- whether db run in one job or files farmed out db_mode = kwargs.pop('db_mode', kwargs.pop('mode', 'continuous')).lower() kwargs['db_mode'] = db_mode if db_mode == 'continuous': pass elif db_mode == 'sow': pass elif db_mode == 'reap': db_linkage = kwargs.get('linkage', None) if db_linkage is None: raise ValidationError( """Database execution mode 'reap' requires a linkage option.""" ) else: raise ValidationError("""Database execution mode '%s' not valid.""" % (db_mode)) # Option counterpoise- whether for interaction energy databases run in bsse-corrected or not db_cp = kwargs.get('cp', False) if db_cp is True: try: database.ACTV_CP except AttributeError: raise ValidationError( """Counterpoise correction mode 'yes' invalid for database %s.""" % (db_name)) else: ACTV = database.ACTV_CP elif db_cp is False: pass else: raise ValidationError( """Counterpoise correction mode '%s' not valid.""" % (db_cp)) # Option relaxed- whether for non-frozen-monomer interaction energy databases include deformation correction or not? db_rlxd = kwargs.get('rlxd', False) if db_rlxd is True: if db_cp is True: try: database.ACTV_CPRLX database.RXNM_CPRLX except AttributeError: raise ValidationError( 'Deformation and counterpoise correction mode \'yes\' invalid for database %s.' % (db_name)) else: ACTV = database.ACTV_CPRLX RXNM = database.RXNM_CPRLX elif db_cp is False: try: database.ACTV_RLX except AttributeError: raise ValidationError( 'Deformation correction mode \'yes\' invalid for database %s.' % (db_name)) else: ACTV = database.ACTV_RLX elif db_rlxd is False: #elif no.match(str(db_rlxd)): pass else: raise ValidationError('Deformation correction mode \'%s\' not valid.' % (db_rlxd)) # Option zero-point-correction- whether for thermochem databases jobs are corrected by zpe db_zpe = kwargs.get('zpe', False) if db_zpe is True: raise ValidationError( 'Zero-point-correction mode \'yes\' not yet implemented.') elif db_zpe is False: pass else: raise ValidationError('Zero-point-correction \'mode\' %s not valid.' % (db_zpe)) # Option benchmark- whether error statistics computed wrt alternate reference energies db_benchmark = 'default' if 'benchmark' in kwargs: db_benchmark = kwargs['benchmark'] if db_benchmark.lower() == 'default': pass else: BIND = p4util.getattr_ignorecase(database, 'BIND_' + db_benchmark) if BIND is None: raise ValidationError( 'Special benchmark \'%s\' not available for database %s.' % (db_benchmark, db_name)) # Option tabulate- whether tables of variables other than primary energy method are formed # TODO db(func=cbs,tabulate=[non-current-energy]) # broken db_tabulate = [] if 'tabulate' in kwargs: db_tabulate = kwargs['tabulate'] # Option subset- whether all of the database or just a portion is run db_subset = HRXN if 'subset' in kwargs: db_subset = kwargs['subset'] if isinstance(db_subset, (str, bytes)): if db_subset.lower() == 'small': try: database.HRXN_SM except AttributeError: raise ValidationError( """Special subset 'small' not available for database %s.""" % (db_name)) else: HRXN = database.HRXN_SM elif db_subset.lower() == 'large': try: database.HRXN_LG except AttributeError: raise ValidationError( """Special subset 'large' not available for database %s.""" % (db_name)) else: HRXN = database.HRXN_LG elif db_subset.lower() == 'equilibrium': try: database.HRXN_EQ except AttributeError: raise ValidationError( """Special subset 'equilibrium' not available for database %s.""" % (db_name)) else: HRXN = database.HRXN_EQ else: HRXN = p4util.getattr_ignorecase(database, db_subset) if HRXN is None: HRXN = p4util.getattr_ignorecase(database, 'HRXN_' + db_subset) if HRXN is None: raise ValidationError( """Special subset '%s' not available for database %s.""" % (db_subset, db_name)) else: temp = [] for rxn in db_subset: if rxn in HRXN: temp.append(rxn) else: raise ValidationError( """Subset element '%s' not a member of database %s.""" % (str(rxn), db_name)) HRXN = temp temp = [] for rxn in HRXN: temp.append(ACTV['%s-%s' % (dbse, rxn)]) HSYS = p4util.drop_duplicates(sum(temp, [])) # Sow all the necessary reagent computations core.print_out("\n\n") p4util.banner(("Database %s Computation" % (db_name))) core.print_out("\n") # write index of calcs to output file instructions = """\n The database single-job procedure has been selected through mode='continuous'.\n""" instructions += """ Calculations for the reagents will proceed in the order below and will be followed\n""" instructions += """ by summary results for the database.\n\n""" for rgt in HSYS: instructions += """ %-s\n""" % (rgt) core.print_out(instructions) # Loop through chemical systems ERGT = {} ERXN = {} VRGT = {} VRXN = {} for rgt in HSYS: VRGT[rgt] = {} core.print_out('\n') p4util.banner(' Database {} Computation: Reagent {} \n {}'.format( db_name, rgt, TAGL[rgt])) core.print_out('\n') molecule = core.Molecule.from_dict(GEOS[rgt].to_dict()) molecule.set_name(rgt) molecule.update_geometry() if symmetry_override: molecule.reset_point_group('c1') molecule.fix_orientation(True) molecule.fix_com(True) molecule.update_geometry() if (openshell_override) and (molecule.multiplicity() != 1): if user_reference == 'RHF': core.set_global_option('REFERENCE', 'UHF') elif user_reference == 'RKS': core.set_global_option('REFERENCE', 'UKS') core.set_global_option( 'WRITER_FILE_LABEL', user_writer_file_label + ('' if user_writer_file_label == '' else '-') + rgt) if allowoptexceeded: try: ERGT[rgt] = func(molecule=molecule, **kwargs) except ConvergenceError: core.print_out(f"Optimization exceeded cycles for {rgt}") ERGT[rgt] = 0.0 else: ERGT[rgt] = func(molecule=molecule, **kwargs) core.print_variables() core.print_out(" Database Contributions Map:\n {}\n".format('-' * 75)) for rxn in HRXN: db_rxn = dbse + '-' + str(rxn) if rgt in ACTV[db_rxn]: core.print_out( ' reagent {} contributes by {:.4f} to reaction {}\n'. format(rgt, RXNM[db_rxn][rgt], db_rxn)) core.print_out('\n') for envv in db_tabulate: VRGT[rgt][envv.upper()] = core.variable(envv) core.set_global_option("REFERENCE", user_reference) core.clean() #core.opt_clean() core.clean_variables() # Reap all the necessary reaction computations core.print_out("\n") p4util.banner(("Database %s Results" % (db_name))) core.print_out("\n") maxactv = [] for rxn in HRXN: maxactv.append(len(ACTV[dbse + '-' + str(rxn)])) maxrgt = max(maxactv) table_delimit = '-' * (62 + 20 * maxrgt) tables = '' # find any reactions that are incomplete FAIL = collections.defaultdict(int) for rxn in HRXN: db_rxn = dbse + '-' + str(rxn) for i in range(len(ACTV[db_rxn])): if abs(ERGT[ACTV[db_rxn][i]]) < 1.0e-12: if not allowoptexceeded: FAIL[rxn] = 1 # tabulate requested process::environment variables tables += """ For each VARIABLE requested by tabulate, a 'Reaction Value' will be formed from\n""" tables += """ 'Reagent' values according to weightings 'Wt', as for the REQUESTED ENERGY below.\n""" tables += """ Depending on the nature of the variable, this may or may not make any physical sense.\n""" for rxn in HRXN: db_rxn = dbse + '-' + str(rxn) VRXN[db_rxn] = {} for envv in db_tabulate: envv = envv.upper() tables += """\n ==> %s <==\n\n""" % (envv.title()) tables += _tblhead(maxrgt, table_delimit, 2) for rxn in HRXN: db_rxn = dbse + '-' + str(rxn) if FAIL[rxn]: tables += """\n%23s %8s %8s %8s %8s""" % (db_rxn, '', '****', '', '') for i in range(len(ACTV[db_rxn])): tables += """ %16.8f %2.0f""" % (VRGT[ ACTV[db_rxn][i]][envv], RXNM[db_rxn][ACTV[db_rxn][i]]) else: VRXN[db_rxn][envv] = 0.0 for i in range(len(ACTV[db_rxn])): VRXN[db_rxn][envv] += VRGT[ ACTV[db_rxn][i]][envv] * RXNM[db_rxn][ACTV[db_rxn][i]] tables += """\n%23s %16.8f """ % ( db_rxn, VRXN[db_rxn][envv]) for i in range(len(ACTV[db_rxn])): tables += """ %16.8f %2.0f""" % (VRGT[ ACTV[db_rxn][i]][envv], RXNM[db_rxn][ACTV[db_rxn][i]]) tables += """\n %s\n""" % (table_delimit) # tabulate primary requested energy variable with statistics count_rxn = 0 minDerror = 100000.0 maxDerror = 0.0 MSDerror = 0.0 MADerror = 0.0 RMSDerror = 0.0 tables += """\n ==> %s <==\n\n""" % ('Requested Energy') tables += _tblhead(maxrgt, table_delimit, 1) for rxn in HRXN: db_rxn = dbse + '-' + str(rxn) if FAIL[rxn]: tables += """\n%23s %8.4f %8s %10s %10s""" % ( db_rxn, BIND[db_rxn], '****', '****', '****') for i in range(len(ACTV[db_rxn])): tables += """ %16.8f %2.0f""" % (ERGT[ACTV[db_rxn][i]], RXNM[db_rxn][ACTV[db_rxn][i]]) else: ERXN[db_rxn] = 0.0 for i in range(len(ACTV[db_rxn])): ERXN[db_rxn] += ERGT[ACTV[db_rxn][i]] * RXNM[db_rxn][ ACTV[db_rxn][i]] error = constants.hartree2kcalmol * ERXN[db_rxn] - BIND[db_rxn] tables += """\n%23s %8.4f %8.4f %10.4f %10.4f""" % ( db_rxn, BIND[db_rxn], constants.hartree2kcalmol * ERXN[db_rxn], error, error * constants.cal2J) for i in range(len(ACTV[db_rxn])): tables += """ %16.8f %2.0f""" % (ERGT[ACTV[db_rxn][i]], RXNM[db_rxn][ACTV[db_rxn][i]]) if abs(error) < abs(minDerror): minDerror = error if abs(error) > abs(maxDerror): maxDerror = error MSDerror += error MADerror += abs(error) RMSDerror += error * error count_rxn += 1 tables += """\n %s\n""" % (table_delimit) if count_rxn: MSDerror /= float(count_rxn) MADerror /= float(count_rxn) RMSDerror = math.sqrt(RMSDerror / float(count_rxn)) tables += """%23s %19s %10.4f %10.4f\n""" % ( 'Minimal Dev', '', minDerror, minDerror * constants.cal2J) tables += """%23s %19s %10.4f %10.4f\n""" % ( 'Maximal Dev', '', maxDerror, maxDerror * constants.cal2J) tables += """%23s %19s %10.4f %10.4f\n""" % ( 'Mean Signed Dev', '', MSDerror, MSDerror * constants.cal2J) tables += """%23s %19s %10.4f %10.4f\n""" % ( 'Mean Absolute Dev', '', MADerror, MADerror * constants.cal2J) tables += """%23s %19s %10.4f %10.4f\n""" % ( 'RMS Dev', '', RMSDerror, RMSDerror * constants.cal2J) tables += """ %s\n""" % (table_delimit) core.set_variable('%s DATABASE MEAN SIGNED DEVIATION' % (db_name), MSDerror) core.set_variable('%s DATABASE MEAN ABSOLUTE DEVIATION' % (db_name), MADerror) core.set_variable('%s DATABASE ROOT-MEAN-SQUARE DEVIATION' % (db_name), RMSDerror) core.print_out(tables) finalenergy = MADerror else: finalenergy = 0.0 optstash.restore() DB_RGT.clear() DB_RGT.update(VRGT) DB_RXN.clear() DB_RXN.update(VRXN) return finalenergy
def compute_nbody_components(func, method_string, metadata): """Computes requested N-body components. Performs requested computations for psi4::Molecule object `molecule` according to `compute_list` with function `func` at `method_string` level of theory. Parameters ---------- func : {'energy', 'gradient', 'hessian'} Function object to be called within N-Body procedure. method_string : str Indicates level of theory to be passed to function `func`. metadata : dict of str Dictionary of N-body metadata. Required ``'key': value`` pairs: ``'compute_list'``: dict of int: set List of computations to perform. Keys indicate body-levels, e.g,. `compute_list[2]` is the list of all 2-body computations required. ``'kwargs'``: dict Arbitrary keyword arguments to be passed to function `func`. Returns ------- dict of str: dict Dictionary containing computed N-body components. Contents: ``'energies'``: dict of set: float64 Dictionary containing all energy components required for given N-body procedure. ``'ptype'``: dict of set: float64 or dict of set: psi4.Matrix Dictionary of returned quantities from calls of function `func` during N-body computations ``'intermediates'``: dict of str: float64 Dictionary of psivars for intermediate N-body computations to be set at the end of the N-body procedure. """ # Get required metadata kwargs = metadata['kwargs'] molecule = metadata['molecule'] #molecule = core.get_active_molecule() compute_list = metadata['compute_dict']['all'] # Now compute the energies energies_dict = {} gradients_dict = {} ptype_dict = {} intermediates_dict = {} if kwargs.get('charge_method', False) and not metadata['embedding_charges']: metadata['embedding_charges'] = driver_nbody_helper.compute_charges(kwargs['charge_method'], kwargs.get('charge_type', 'MULLIKEN_CHARGES').upper(), molecule) for count, n in enumerate(compute_list.keys()): core.print_out("\n ==> N-Body: Now computing %d-body complexes <==\n\n" % n) total = len(compute_list[n]) for num, pair in enumerate(compute_list[n]): core.print_out( "\n N-Body: Computing complex (%d/%d) with fragments %s in the basis of fragments %s.\n\n" % (num + 1, total, str(pair[0]), str(pair[1]))) ghost = list(set(pair[1]) - set(pair[0])) current_mol = molecule.extract_subsets(list(pair[0]), ghost) current_mol.set_name("%s_%i_%i" % (current_mol.name(), count, num)) if metadata['embedding_charges']: driver_nbody_helper.electrostatic_embedding(metadata, pair=pair) # Save energies info ptype_dict[pair], wfn = func(method_string, molecule=current_mol, return_wfn=True, **kwargs) core.set_global_option_python('EXTERN', None) energies_dict[pair] = core.variable("CURRENT ENERGY") gradients_dict[pair] = wfn.gradient() var_key = "N-BODY (%s)@(%s) TOTAL ENERGY" % (', '.join([str(i) for i in pair[0]]), ', '.join( [str(i) for i in pair[1]])) intermediates_dict[var_key] = core.variable("CURRENT ENERGY") core.print_out("\n N-Body: Complex Energy (fragments = %s, basis = %s: %20.14f)\n" % (str( pair[0]), str(pair[1]), energies_dict[pair])) # Flip this off for now, needs more testing #if 'cp' in bsse_type_list and (len(bsse_type_list) == 1): # core.set_global_option('DF_INTS_IO', 'LOAD') core.clean() return { 'energies': energies_dict, 'gradients': gradients_dict, 'ptype': ptype_dict, 'intermediates': intermediates_dict }
def compute_nbody_components(func, method_string, metadata): """Computes requested N-body components. Performs requested computations for psi4::Molecule object `molecule` according to `compute_list` with function `func` at `method_string` level of theory. Parameters ---------- func : str {'energy', 'gradient', 'hessian'} Function object to be called within N-Body procedure. method_string : str Indicates level of theory to be passed to function `func`. metadata : dict of str Dictionary of N-body metadata. Required ``'key': value`` pairs: ``'compute_list'``: dict of int: set List of computations to perform. Keys indicate body-levels, e.g,. `compute_list[2]` is the list of all 2-body computations required. ``'kwargs'``: dict Arbitrary keyword arguments to be passed to function `func`. Returns ------- dict of str: dict Dictionary containing computed N-body components. Contents: ``'energies'``: dict of set: float64 Dictionary containing all energy components required for given N-body procedure. ``'ptype'``: dict of set: float64 or dict of set: psi4.Matrix Dictionary of returned quantities from calls of function `func` during N-body computations ``'intermediates'``: dict of str: float64 Dictionary of psivars for intermediate N-body computations to be set at the end of the N-body procedure. """ # Get required metadata kwargs = metadata['kwargs'] molecule = metadata['molecule'] #molecule = core.get_active_molecule() compute_list = metadata['compute_dict']['all'] # Now compute the energies energies_dict = {} gradients_dict = {} ptype_dict = {} intermediates_dict = {} if kwargs.get('charge_method', False) and not metadata['embedding_charges']: metadata['embedding_charges'] = driver_nbody_helper.compute_charges( kwargs['charge_method'], kwargs.get('charge_type', 'MULLIKEN_CHARGES').upper(), molecule) for count, n in enumerate(compute_list.keys()): core.print_out( "\n ==> N-Body: Now computing %d-body complexes <==\n\n" % n) total = len(compute_list[n]) for num, pair in enumerate(compute_list[n]): core.print_out( "\n N-Body: Computing complex (%d/%d) with fragments %s in the basis of fragments %s.\n\n" % (num + 1, total, str(pair[0]), str(pair[1]))) ghost = list(set(pair[1]) - set(pair[0])) current_mol = molecule.extract_subsets(list(pair[0]), ghost) current_mol.set_name("%s_%i_%i" % (current_mol.name(), count, num)) if metadata['embedding_charges']: driver_nbody_helper.electrostatic_embedding(metadata, pair=pair) # Save energies info ptype_dict[pair], wfn = func(method_string, molecule=current_mol, return_wfn=True, **kwargs) core.set_global_option_python('EXTERN', None) energies_dict[pair] = core.variable("CURRENT ENERGY") gradients_dict[pair] = wfn.gradient() var_key = "N-BODY (%s)@(%s) TOTAL ENERGY" % (', '.join( [str(i) for i in pair[0]]), ', '.join([str(i) for i in pair[1]])) intermediates_dict[var_key] = core.variable("CURRENT ENERGY") core.print_out( "\n N-Body: Complex Energy (fragments = %s, basis = %s: %20.14f)\n" % (str(pair[0]), str(pair[1]), energies_dict[pair])) # Flip this off for now, needs more testing #if 'cp' in bsse_type_list and (len(bsse_type_list) == 1): # core.set_global_option('DF_INTS_IO', 'LOAD') core.clean() return { 'energies': energies_dict, 'gradients': gradients_dict, 'ptype': ptype_dict, 'intermediates': intermediates_dict }
def _clean_psi_environ(do_clean): if do_clean: psi4.core.clean_variables() psi4.core.clean_options() core.clean()
def nbody_gufunc(func, method_string, **kwargs): """ Computes the nbody interaction energy, gradient, or Hessian depending on input. This is a generalized univeral function for computing interaction quantities. :returns: *return type of func* |w--w| The interaction data. :returns: (*float*, :ref:`Wavefunction<sec:psimod_Wavefunction>`) |w--w| interaction data and wavefunction with energy/gradient/hessian set appropriately when **return_wfn** specified. :type func: function :param func: ``energy`` || etc. Python function that accepts method_string and a molecule. Returns a energy, gradient, or Hessian as requested. :type method_string: string :param method_string: ``'scf'`` || ``'mp2'`` || ``'ci5'`` || etc. First argument, lowercase and usually unlabeled. Indicates the computational method to be passed to func. :type molecule: :ref:`molecule <op_py_molecule>` :param molecule: ``h2o`` || etc. The target molecule, if not the last molecule defined. :type return_wfn: :ref:`boolean <op_py_boolean>` :param return_wfn: ``'on'`` || |dl| ``'off'`` |dr| Indicate to additionally return the :ref:`Wavefunction<sec:psimod_Wavefunction>` calculation result as the second element of a tuple. :type bsse_type: string or list :param bsse_type: ``'cp'`` || ``['nocp', 'vmfc']`` || |dl| ``None`` |dr| || etc. Type of BSSE correction to compute: CP, NoCP, or VMFC. The first in this list is returned by this function. By default, this function is not called. :type max_nbody: int :param max_nbody: ``3`` || etc. Maximum n-body to compute, cannot exceed the number of fragments in the moleucle. :type ptype: string :param ptype: ``'energy'`` || ``'gradient'`` || ``'hessian'`` Type of the procedure passed in. :type return_total_data: :ref:`boolean <op_py_boolean>` :param return_total_data: ``'on'`` || |dl| ``'off'`` |dr| If True returns the total data (energy/gradient/etc) of the system, otherwise returns interaction data. """ ### ==> Parse some kwargs <== kwargs = p4util.kwargs_lower(kwargs) return_wfn = kwargs.pop('return_wfn', False) ptype = kwargs.pop('ptype', None) return_total_data = kwargs.pop('return_total_data', False) molecule = kwargs.pop('molecule', core.get_active_molecule()) molecule.update_geometry() core.clean_variables() if ptype not in ['energy', 'gradient', 'hessian']: raise ValidationError( """N-Body driver: The ptype '%s' is not regonized.""" % ptype) # Figure out BSSE types do_cp = False do_nocp = False do_vmfc = False return_method = False # Must be passed bsse_type bsse_type_list = kwargs.pop('bsse_type') if bsse_type_list is None: raise ValidationError("N-Body GUFunc: Must pass a bsse_type") if not isinstance(bsse_type_list, list): bsse_type_list = [bsse_type_list] for num, btype in enumerate(bsse_type_list): if btype.lower() == 'cp': do_cp = True if (num == 0): return_method = 'cp' elif btype.lower() == 'nocp': do_nocp = True if (num == 0): return_method = 'nocp' elif btype.lower() == 'vmfc': do_vmfc = True if (num == 0): return_method = 'vmfc' else: raise ValidationError( "N-Body GUFunc: bsse_type '%s' is not recognized" % btype.lower()) max_nbody = kwargs.get('max_nbody', -1) max_frag = molecule.nfragments() if max_nbody == -1: max_nbody = molecule.nfragments() else: max_nbody = min(max_nbody, max_frag) # What levels do we need? nbody_range = range(1, max_nbody + 1) fragment_range = range(1, max_frag + 1) # Flip this off for now, needs more testing # If we are doing CP lets save them integrals #if 'cp' in bsse_type_list and (len(bsse_type_list) == 1): # # Set to save RI integrals for repeated full-basis computations # ri_ints_io = core.get_global_option('DF_INTS_IO') # # inquire if above at all applies to dfmp2 or just scf # core.set_global_option('DF_INTS_IO', 'SAVE') # psioh = core.IOManager.shared_object() # psioh.set_specific_retention(97, True) bsse_str = bsse_type_list[0] if len(bsse_type_list) > 1: bsse_str = str(bsse_type_list) core.print_out("\n\n") core.print_out(" ===> N-Body Interaction Abacus <===\n") core.print_out(" BSSE Treatment: %s\n" % bsse_str) cp_compute_list = {x: set() for x in nbody_range} nocp_compute_list = {x: set() for x in nbody_range} vmfc_compute_list = {x: set() for x in nbody_range} vmfc_level_list = {x: set() for x in nbody_range } # Need to sum something slightly different # Build up compute sets if do_cp: # Everything is in dimer basis basis_tuple = tuple(fragment_range) for nbody in nbody_range: for x in it.combinations(fragment_range, nbody): cp_compute_list[nbody].add((x, basis_tuple)) if do_nocp: # Everything in monomer basis for nbody in nbody_range: for x in it.combinations(fragment_range, nbody): nocp_compute_list[nbody].add((x, x)) if do_vmfc: # Like a CP for all combinations of pairs or greater for nbody in nbody_range: for cp_combos in it.combinations(fragment_range, nbody): basis_tuple = tuple(cp_combos) for interior_nbody in nbody_range: for x in it.combinations(cp_combos, interior_nbody): combo_tuple = (x, basis_tuple) vmfc_compute_list[interior_nbody].add(combo_tuple) vmfc_level_list[len(basis_tuple)].add(combo_tuple) # Build a comprehensive compute_range compute_list = {x: set() for x in nbody_range} for n in nbody_range: compute_list[n] |= cp_compute_list[n] compute_list[n] |= nocp_compute_list[n] compute_list[n] |= vmfc_compute_list[n] core.print_out(" Number of %d-body computations: %d\n" % (n, len(compute_list[n]))) # Build size and slices dictionaries fragment_size_dict = { frag: molecule.extract_subsets(frag).natom() for frag in range(1, max_frag + 1) } start = 0 fragment_slice_dict = {} for k, v in fragment_size_dict.items(): fragment_slice_dict[k] = slice(start, start + v) start += v molecule_total_atoms = sum(fragment_size_dict.values()) # Now compute the energies energies_dict = {} ptype_dict = {} for n in compute_list.keys(): core.print_out( "\n ==> N-Body: Now computing %d-body complexes <==\n\n" % n) print("\n ==> N-Body: Now computing %d-body complexes <==\n" % n) total = len(compute_list[n]) for num, pair in enumerate(compute_list[n]): core.print_out( "\n N-Body: Computing complex (%d/%d) with fragments %s in the basis of fragments %s.\n\n" % (num + 1, total, str(pair[0]), str(pair[1]))) ghost = list(set(pair[1]) - set(pair[0])) current_mol = molecule.extract_subsets(list(pair[0]), ghost) ptype_dict[pair] = func(method_string, molecule=current_mol, **kwargs) energies_dict[pair] = core.get_variable("CURRENT ENERGY") core.print_out( "\n N-Body: Complex Energy (fragments = %s, basis = %s: %20.14f)\n" % (str(pair[0]), str(pair[1]), energies_dict[pair])) # Flip this off for now, needs more testing #if 'cp' in bsse_type_list and (len(bsse_type_list) == 1): # core.set_global_option('DF_INTS_IO', 'LOAD') core.clean() # Final dictionaries cp_energy_by_level = {n: 0.0 for n in nbody_range} nocp_energy_by_level = {n: 0.0 for n in nbody_range} cp_energy_body_dict = {n: 0.0 for n in nbody_range} nocp_energy_body_dict = {n: 0.0 for n in nbody_range} vmfc_energy_body_dict = {n: 0.0 for n in nbody_range} # Build out ptype dictionaries if needed if ptype != 'energy': if ptype == 'gradient': arr_shape = (molecule_total_atoms, 3) elif ptype == 'hessian': arr_shape = (molecule_total_atoms * 3, molecule_total_atoms * 3) else: raise KeyError("N-Body: ptype '%s' not recognized" % ptype) cp_ptype_by_level = {n: np.zeros(arr_shape) for n in nbody_range} nocp_ptype_by_level = {n: np.zeros(arr_shape) for n in nbody_range} vmfc_ptype_by_level = {n: np.zeros(arr_shape) for n in nbody_range} cp_ptype_body_dict = {n: np.zeros(arr_shape) for n in nbody_range} nocp_ptype_body_dict = {n: np.zeros(arr_shape) for n in nbody_range} vmfc_ptype_body_dict = {n: np.zeros(arr_shape) for n in nbody_range} else: cp_ptype_by_level, cp_ptype_body_dict = None, None nocp_ptype_by_level, nocp_ptype_body_dict = None, None vmfc_ptype_body_dict = None # Sum up all of the levels for n in nbody_range: # Energy cp_energy_by_level[n] = sum(energies_dict[v] for v in cp_compute_list[n]) nocp_energy_by_level[n] = sum(energies_dict[v] for v in nocp_compute_list[n]) # Special vmfc case if n > 1: vmfc_energy_body_dict[n] = vmfc_energy_body_dict[n - 1] for tup in vmfc_level_list[n]: vmfc_energy_body_dict[n] += ( (-1)**(n - len(tup[0]))) * energies_dict[tup] # Do ptype if ptype != 'energy': _sum_cluster_ptype_data(ptype, ptype_dict, cp_compute_list[n], fragment_slice_dict, fragment_size_dict, cp_ptype_by_level[n]) _sum_cluster_ptype_data(ptype, ptype_dict, nocp_compute_list[n], fragment_slice_dict, fragment_size_dict, nocp_ptype_by_level[n]) _sum_cluster_ptype_data(ptype, ptype_dict, vmfc_level_list[n], fragment_slice_dict, fragment_size_dict, vmfc_ptype_by_level[n], vmfc=True) # Compute cp energy and ptype if do_cp: for n in nbody_range: if n == max_frag: cp_energy_body_dict[n] = cp_energy_by_level[n] if ptype != 'energy': cp_ptype_body_dict[n][:] = cp_ptype_by_level[n] continue for k in range(1, n + 1): take_nk = nCr(max_frag - k - 1, n - k) sign = ((-1)**(n - k)) value = cp_energy_by_level[k] cp_energy_body_dict[n] += take_nk * sign * value if ptype != 'energy': value = cp_ptype_by_level[k] cp_ptype_body_dict[n] += take_nk * sign * value _print_nbody_energy(cp_energy_body_dict, "Counterpoise Corrected (CP)") cp_interaction_energy = cp_energy_body_dict[ max_nbody] - cp_energy_body_dict[1] core.set_variable('Counterpoise Corrected Total Energy', cp_energy_body_dict[max_nbody]) core.set_variable('Counterpoise Corrected Interaction Energy', cp_interaction_energy) for n in nbody_range[1:]: var_key = 'CP-CORRECTED %d-BODY INTERACTION ENERGY' % n core.set_variable(var_key, cp_energy_body_dict[n] - cp_energy_body_dict[1]) # Compute nocp energy and ptype if do_nocp: for n in nbody_range: if n == max_frag: nocp_energy_body_dict[n] = nocp_energy_by_level[n] if ptype != 'energy': nocp_ptype_body_dict[n][:] = nocp_ptype_by_level[n] continue for k in range(1, n + 1): take_nk = nCr(max_frag - k - 1, n - k) sign = ((-1)**(n - k)) value = nocp_energy_by_level[k] nocp_energy_body_dict[n] += take_nk * sign * value if ptype != 'energy': value = nocp_ptype_by_level[k] nocp_ptype_body_dict[n] += take_nk * sign * value _print_nbody_energy(nocp_energy_body_dict, "Non-Counterpoise Corrected (NoCP)") nocp_interaction_energy = nocp_energy_body_dict[ max_nbody] - nocp_energy_body_dict[1] core.set_variable('Non-Counterpoise Corrected Total Energy', nocp_energy_body_dict[max_nbody]) core.set_variable('Non-Counterpoise Corrected Interaction Energy', nocp_interaction_energy) for n in nbody_range[1:]: var_key = 'NOCP-CORRECTED %d-BODY INTERACTION ENERGY' % n core.set_variable( var_key, nocp_energy_body_dict[n] - nocp_energy_body_dict[1]) # Compute vmfc energy and ptype if do_vmfc: _print_nbody_energy(vmfc_energy_body_dict, "Valiron-Mayer Function Couterpoise (VMFC)") vmfc_interaction_energy = vmfc_energy_body_dict[ max_nbody] - vmfc_energy_body_dict[1] core.set_variable('Valiron-Mayer Function Couterpoise Total Energy', vmfc_energy_body_dict[max_nbody]) core.set_variable( 'Valiron-Mayer Function Couterpoise Interaction Energy', vmfc_interaction_energy) for n in nbody_range[1:]: var_key = 'VMFC-CORRECTED %d-BODY INTERACTION ENERGY' % n core.set_variable( var_key, vmfc_energy_body_dict[n] - vmfc_energy_body_dict[1]) if return_method == 'cp': ptype_body_dict = cp_ptype_body_dict energy_body_dict = cp_energy_body_dict elif return_method == 'nocp': ptype_body_dict = nocp_ptype_body_dict energy_body_dict = nocp_energy_body_dict elif return_method == 'vmfc': ptype_body_dict = vmfc_ptype_body_dict energy_body_dict = vmfc_energy_body_dict else: raise ValidationError( "N-Body Wrapper: Invalid return type. Should never be here, please post this error on github." ) # Figure out and build return types if return_total_data: ret_energy = energy_body_dict[max_nbody] else: ret_energy = energy_body_dict[max_nbody] ret_energy -= energy_body_dict[1] if ptype != 'energy': if return_total_data: np_final_ptype = ptype_body_dict[max_nbody].copy() else: np_final_ptype = ptype_body_dict[max_nbody].copy() np_final_ptype -= ptype_body_dict[1] ret_ptype = core.Matrix.from_array(np_final_ptype) else: ret_ptype = ret_energy # Build and set a wavefunction wfn = core.Wavefunction.build(molecule, 'sto-3g') wfn.cdict["nbody_energy"] = energies_dict wfn.cdict["nbody_ptype"] = ptype_dict wfn.cdict["nbody_body_energy"] = energy_body_dict wfn.cdict["nbody_body_ptype"] = ptype_body_dict if ptype == 'gradient': wfn.set_gradient(ret_ptype) elif ptype == 'hessian': wfn.set_hessian(ret_ptype) core.set_variable("CURRENT ENERGY", ret_energy) if return_wfn: return (ret_ptype, wfn) else: return ret_ptype
def multi_level(func, **kwargs): """ Use different levels of theory for different expansion levels See kwargs description in driver_nbody.nbody_gufunc :returns: *return type of func* |w--w| The data. :returns: (*float*, :py:class:`~psi4.core.Wavefunction`) |w--w| data and wavefunction with energy/gradient/hessian set appropriately when **return_wfn** specified. """ from psi4.driver.driver_nbody import nbody_gufunc from psi4.driver.driver_nbody import _print_nbody_energy ptype = kwargs['ptype'] return_wfn = kwargs.get('return_wfn', False) kwargs['return_wfn'] = True levels = kwargs.pop('levels') for i in levels: if isinstance(i, str): levels[i.lower()] = levels.pop(i) supersystem = levels.pop('supersystem', False) molecule = kwargs.get('molecule', core.get_active_molecule()) kwargs['bsse_type'] = [kwargs['bsse_type']] if isinstance(kwargs['bsse_type'], str) else kwargs['bsse_type'] natoms = molecule.natom() # Initialize with zeros energy_result, gradient_result, hessian_result = 0, None, None energy_body_contribution = {b: {} for b in kwargs['bsse_type']} energy_body_dict = {b: {} for b in kwargs['bsse_type']} wfns = {} if ptype in ['gradient', 'hessian']: gradient_result = np.zeros((natoms, 3)) if ptype == 'hessian': hessian_result = np.zeros((natoms * 3, natoms * 3)) if kwargs.get('charge_method', False) and not kwargs.get('embedding_charges', False): kwargs['embedding_charges'] = compute_charges(kwargs['charge_method'], kwargs.get('charge_type', 'MULLIKEN_CHARGES').upper(), molecule) for n in sorted(levels)[::-1]: molecule.set_name('%i' %n) kwargs_copy = kwargs.copy() kwargs_copy['max_nbody'] = n energy_bsse_dict = {b: 0 for b in kwargs['bsse_type']} if isinstance(levels[n], str): # If a new level of theory is provided, compute contribution ret, wfn = nbody_gufunc(func, levels[n], **kwargs_copy) wfns[n] = wfn else: # For the n-body contribution, use available data from the higher order levels[n]-body wfn = wfns[levels[n]] for m in range(n - 1, n + 1): if m == 0: continue # Subtract the (n-1)-body contribution from the n-body contribution to get the n-body effect sign = (-1)**(1 - m // n) for b in kwargs['bsse_type']: energy_bsse_dict[b] += sign * wfn.variable('%i%s' % (m, b.lower())) if ptype in ['gradient', 'hessian']: gradient_result += sign * np.array(wfn.variable('GRADIENT ' + str(m))) # Keep 1-body contribution to compute interaction data if n == 1: gradient1 = np.array(wfn.variable('GRADIENT ' + str(m))) if ptype == 'hessian': hessian_result += sign * np.array(wfn.variable('HESSIAN ' + str(m))) if n == 1: hessian1 = np.array(wfn.variable('HESSIAN ' + str(m))) energy_result += energy_bsse_dict[kwargs['bsse_type'][0]] for b in kwargs['bsse_type']: energy_body_contribution[b][n] = energy_bsse_dict[b] if supersystem: # Super system recovers higher order effects at a lower level molecule.set_name('supersystem') kwargs_copy = kwargs.copy() kwargs_copy.pop('bsse_type') kwargs_copy.pop('ptype') ret, wfn_super = func(supersystem, **kwargs_copy) core.clean() kwargs_copy = kwargs.copy() kwargs_copy['bsse_type'] = 'nocp' kwargs_copy['max_nbody'] = max(levels) # Subtract lower order effects to avoid double counting ret, wfn = nbody_gufunc(func, supersystem, **kwargs_copy) energy_result += wfn_super.energy() - wfn.variable(str(max(levels))) for b in kwargs['bsse_type']: energy_body_contribution[b][molecule.nfragments()] = wfn_super.energy() - wfn.variable( str(max(levels))) if ptype in ['gradient', 'hessian']: gradient_result += np.array(wfn_super.gradient()) - np.array(wfn.variable('GRADIENT ' + str(max(levels)))) if ptype == 'hessian': hessian_result += np.array(wfn_super.hessian()) - np.array(wfn.variable('HESSIAN ' + str(max(levels)))) levels['supersystem'] = supersystem for b in kwargs['bsse_type']: for n in energy_body_contribution[b]: energy_body_dict[b][n] = sum( [energy_body_contribution[b][i] for i in range(1, n + 1) if i in energy_body_contribution[b]]) is_embedded = kwargs.get('embedding_charges', False) or kwargs.get('charge_method', False) for b in kwargs['bsse_type']: _print_nbody_energy(energy_body_dict[b], '%s-corrected multilevel many-body expansion' % b.upper(), is_embedded) if not kwargs['return_total_data']: # Remove monomer cotribution for interaction data energy_result -= energy_body_dict[kwargs['bsse_type'][0]][1] if ptype in ['gradient', 'hessian']: gradient_result -= gradient1 if ptype == 'hessian': hessian_result -= hessian1 wfn_out = core.Wavefunction.build(molecule, 'def2-svp') core.set_variable("CURRENT ENERGY", energy_result) wfn_out.set_variable("CURRENT ENERGY", energy_result) gradient_result = core.Matrix.from_array(gradient_result) if gradient_result is not None else None wfn_out.set_gradient(gradient_result) hessian_result = core.Matrix.from_array(hessian_result) if hessian_result is not None else None wfn_out.set_hessian(hessian_result) ptype_result = eval(ptype + '_result') for b in kwargs['bsse_type']: for i in energy_body_dict[b]: wfn_out.set_variable(str(i) + b, energy_body_dict[b][i]) if kwargs['return_wfn']: return (ptype_result, wfn_out) else: return ptype_result
def run_gaussian_2(name, **kwargs): # throw an exception for open-shells if (core.get_option('SCF','REFERENCE') != 'RHF' ): raise ValidationError("""g2 computations require "reference rhf".""") # stash user options: optstash = p4util.OptionsState( ['FNOCC','COMPUTE_TRIPLES'], ['FNOCC','COMPUTE_MP4_TRIPLES'], ['FREEZE_CORE'], ['MP2_TYPE'], ['SCF','SCF_TYPE']) # override default scf_type core.set_local_option('SCF','SCF_TYPE','PK') # optimize geometry at scf level core.clean() core.set_global_option('BASIS',"6-31G(D)") driver.optimize('scf') core.clean() # scf frequencies for zpe # NOTE This line should not be needed, but without it there's a seg fault scf_e, ref = driver.frequency('scf', return_wfn=True) # thermodynamic properties du = core.get_variable('INTERNAL ENERGY CORRECTION') dh = core.get_variable('ENTHALPY CORRECTION') dg = core.get_variable('GIBBS FREE ENERGY CORRECTION') freqs = ref.frequencies() nfreq = freqs.dim(0) freqsum = 0.0 for i in range(0, nfreq): freqsum += freqs.get(i) zpe = freqsum / p4const.psi_hartree2wavenumbers * 0.8929 * 0.5 core.clean() # optimize geometry at mp2 (no frozen core) level # note: freeze_core isn't an option in MP2 core.set_global_option('FREEZE_CORE',"FALSE") core.set_global_option('MP2_TYPE', 'CONV') driver.optimize('mp2') core.clean() # qcisd(t) core.set_local_option('FNOCC','COMPUTE_MP4_TRIPLES',"TRUE") core.set_global_option('FREEZE_CORE',"TRUE") core.set_global_option('BASIS',"6-311G(D_P)") ref = driver.proc.run_fnocc('qcisd(t)', return_wfn=True, **kwargs) # HLC: high-level correction based on number of valence electrons nirrep = ref.nirrep() frzcpi = ref.frzcpi() nfzc = 0 for i in range (0,nirrep): nfzc += frzcpi[i] nalpha = ref.nalpha() - nfzc nbeta = ref.nbeta() - nfzc # hlc of gaussian-2 hlc = -0.00481 * nalpha -0.00019 * nbeta # hlc of gaussian-1 hlc1 = -0.00614 * nalpha eqci_6311gdp = core.get_variable("QCISD(T) TOTAL ENERGY") emp4_6311gd = core.get_variable("MP4 TOTAL ENERGY") emp2_6311gd = core.get_variable("MP2 TOTAL ENERGY") core.clean() # correction for diffuse functions core.set_global_option('BASIS',"6-311+G(D_P)") driver.energy('mp4') emp4_6311pg_dp = core.get_variable("MP4 TOTAL ENERGY") emp2_6311pg_dp = core.get_variable("MP2 TOTAL ENERGY") core.clean() # correction for polarization functions core.set_global_option('BASIS',"6-311G(2DF_P)") driver.energy('mp4') emp4_6311g2dfp = core.get_variable("MP4 TOTAL ENERGY") emp2_6311g2dfp = core.get_variable("MP2 TOTAL ENERGY") core.clean() # big basis mp2 core.set_global_option('BASIS',"6-311+G(3DF_2P)") #run_fnocc('_mp2',**kwargs) driver.energy('mp2') emp2_big = core.get_variable("MP2 TOTAL ENERGY") core.clean() eqci = eqci_6311gdp e_delta_g2 = emp2_big + emp2_6311gd - emp2_6311g2dfp - emp2_6311pg_dp e_plus = emp4_6311pg_dp - emp4_6311gd e_2df = emp4_6311g2dfp - emp4_6311gd eg2 = eqci + e_delta_g2 + e_plus + e_2df eg2_mp2_0k = eqci + (emp2_big - emp2_6311gd) + hlc + zpe core.print_out('\n') core.print_out(' ==> G1/G2 Energy Components <==\n') core.print_out('\n') core.print_out(' QCISD(T): %20.12lf\n' % eqci) core.print_out(' E(Delta): %20.12lf\n' % e_delta_g2) core.print_out(' E(2DF): %20.12lf\n' % e_2df) core.print_out(' E(+): %20.12lf\n' % e_plus) core.print_out(' E(G1 HLC): %20.12lf\n' % hlc1) core.print_out(' E(G2 HLC): %20.12lf\n' % hlc) core.print_out(' E(ZPE): %20.12lf\n' % zpe) core.print_out('\n') core.print_out(' ==> 0 Kelvin Results <==\n') core.print_out('\n') eg2_0k = eg2 + zpe + hlc core.print_out(' G1: %20.12lf\n' % (eqci + e_plus + e_2df + hlc1 + zpe)) core.print_out(' G2(MP2): %20.12lf\n' % eg2_mp2_0k) core.print_out(' G2: %20.12lf\n' % eg2_0k) core.set_variable("G1 TOTAL ENERGY",eqci + e_plus + e_2df + hlc1 + zpe) core.set_variable("G2 TOTAL ENERGY",eg2_0k) core.set_variable("G2(MP2) TOTAL ENERGY",eg2_mp2_0k) core.print_out('\n') T = core.get_global_option('T') core.print_out(' ==> %3.0lf Kelvin Results <==\n'% T) core.print_out('\n') internal_energy = eg2_mp2_0k + du - zpe / 0.8929 enthalpy = eg2_mp2_0k + dh - zpe / 0.8929 gibbs = eg2_mp2_0k + dg - zpe / 0.8929 core.print_out(' G2(MP2) energy: %20.12lf\n' % internal_energy ) core.print_out(' G2(MP2) enthalpy: %20.12lf\n' % enthalpy) core.print_out(' G2(MP2) free energy: %20.12lf\n' % gibbs) core.print_out('\n') core.set_variable("G2(MP2) INTERNAL ENERGY",internal_energy) core.set_variable("G2(MP2) ENTHALPY",enthalpy) core.set_variable("G2(MP2) FREE ENERGY",gibbs) internal_energy = eg2_0k + du - zpe / 0.8929 enthalpy = eg2_0k + dh - zpe / 0.8929 gibbs = eg2_0k + dg - zpe / 0.8929 core.print_out(' G2 energy: %20.12lf\n' % internal_energy ) core.print_out(' G2 enthalpy: %20.12lf\n' % enthalpy) core.print_out(' G2 free energy: %20.12lf\n' % gibbs) core.set_variable("CURRENT ENERGY",eg2_0k) core.set_variable("G2 INTERNAL ENERGY",internal_energy) core.set_variable("G2 ENTHALPY",enthalpy) core.set_variable("G2 FREE ENERGY",gibbs) core.clean() optstash.restore() # return 0K g2 results return eg2_0k
def multi_level(func, **kwargs): """ Use different levels of theory for different expansion levels See kwargs description in driver_nbody.nbody_gufunc :returns: *return type of func* |w--w| The data. :returns: (*float*, :py:class:`~psi4.core.Wavefunction`) |w--w| data and wavefunction with energy/gradient/hessian set appropriately when **return_wfn** specified. """ from psi4.driver.driver_nbody import _print_nbody_energy, nbody_gufunc ptype = kwargs['ptype'] return_wfn = kwargs.get('return_wfn', False) kwargs['return_wfn'] = True levels = {} for k, v in kwargs.pop('levels').items(): if isinstance(k, str): levels[k.lower()] = v else: levels[k] = v supersystem = levels.pop('supersystem', False) molecule = kwargs.get('molecule', core.get_active_molecule()) kwargs['bsse_type'] = [kwargs['bsse_type']] if isinstance(kwargs['bsse_type'], str) else kwargs['bsse_type'] natoms = molecule.natom() # Initialize with zeros energy_result, gradient_result, hessian_result = 0, None, None energy_body_contribution = {b: {} for b in kwargs['bsse_type']} energy_body_dict = {b: {} for b in kwargs['bsse_type']} wfns = {} if ptype in ['gradient', 'hessian']: gradient_result = np.zeros((natoms, 3)) if ptype == 'hessian': hessian_result = np.zeros((natoms * 3, natoms * 3)) if kwargs.get('charge_method', False) and not kwargs.get('embedding_charges', False): kwargs['embedding_charges'] = compute_charges(kwargs['charge_method'], kwargs.get('charge_type', 'MULLIKEN_CHARGES').upper(), molecule) for n in sorted(levels)[::-1]: molecule.set_name('%i' % n) kwargs_copy = kwargs.copy() kwargs_copy['max_nbody'] = n energy_bsse_dict = {b: 0 for b in kwargs['bsse_type']} if isinstance(levels[n], str): # If a new level of theory is provided, compute contribution ret, wfn = nbody_gufunc(func, levels[n], **kwargs_copy) wfns[n] = wfn else: # For the n-body contribution, use available data from the higher order levels[n]-body wfn = wfns[levels[n]] for m in range(n - 1, n + 1): if m == 0: continue # Subtract the (n-1)-body contribution from the n-body contribution to get the n-body effect sign = (-1)**(1 - m // n) for b in kwargs['bsse_type']: energy_bsse_dict[b] += sign * wfn.variable('%i%s' % (m, b.lower())) if ptype in ['gradient', 'hessian']: gradient_result += sign * np.array(wfn.variable('GRADIENT ' + str(m))) # Keep 1-body contribution to compute interaction data if n == 1: gradient1 = np.array(wfn.variable('GRADIENT ' + str(m))) if ptype == 'hessian': hessian_result += sign * np.array(wfn.variable('HESSIAN ' + str(m))) if n == 1: hessian1 = np.array(wfn.variable('HESSIAN ' + str(m))) energy_result += energy_bsse_dict[kwargs['bsse_type'][0]] for b in kwargs['bsse_type']: energy_body_contribution[b][n] = energy_bsse_dict[b] if supersystem: # Super system recovers higher order effects at a lower level molecule.set_name('supersystem') kwargs_copy = kwargs.copy() kwargs_copy.pop('bsse_type') kwargs_copy.pop('ptype') ret, wfn_super = func(supersystem, **kwargs_copy) core.clean() kwargs_copy = kwargs.copy() kwargs_copy['bsse_type'] = 'nocp' kwargs_copy['max_nbody'] = max(levels) # Subtract lower order effects to avoid double counting ret, wfn = nbody_gufunc(func, supersystem, **kwargs_copy) energy_result += wfn_super.energy() - wfn.variable(str(max(levels))) for b in kwargs['bsse_type']: energy_body_contribution[b][molecule.nfragments()] = wfn_super.energy() - wfn.variable( str(max(levels))) if ptype in ['gradient', 'hessian']: gradient_result += np.array(wfn_super.gradient()) - np.array(wfn.variable('GRADIENT ' + str(max(levels)))) if ptype == 'hessian': hessian_result += np.array(wfn_super.hessian()) - np.array(wfn.variable('HESSIAN ' + str(max(levels)))) levels['supersystem'] = supersystem for b in kwargs['bsse_type']: for n in energy_body_contribution[b]: energy_body_dict[b][n] = sum( [energy_body_contribution[b][i] for i in range(1, n + 1) if i in energy_body_contribution[b]]) is_embedded = kwargs.get('embedding_charges', False) or kwargs.get('charge_method', False) for b in kwargs['bsse_type']: _print_nbody_energy(energy_body_dict[b], '%s-corrected multilevel many-body expansion' % b.upper(), is_embedded) if not kwargs['return_total_data']: # Remove monomer contribution for interaction data energy_result -= energy_body_dict[kwargs['bsse_type'][0]][1] if ptype in ['gradient', 'hessian']: gradient_result -= gradient1 if ptype == 'hessian': hessian_result -= hessian1 wfn_out = core.Wavefunction.build(molecule, 'def2-svp') core.set_variable("CURRENT ENERGY", energy_result) wfn_out.set_variable("CURRENT ENERGY", energy_result) if gradient_result is not None: wfn_out.set_gradient(core.Matrix.from_array(gradient_result)) if hessian_result is not None: wfn_out.set_hessian(core.Matrix.from_array(hessian_result) ) ptype_result = eval(ptype + '_result') for b in kwargs['bsse_type']: for i in energy_body_dict[b]: wfn_out.set_variable(str(i) + b, energy_body_dict[b][i]) if kwargs['return_wfn']: return (ptype_result, wfn_out) else: return ptype_result
def nbody_gufunc(func, method_string, **kwargs): """ Computes the nbody interaction energy, gradient, or Hessian depending on input. This is a generalized univeral function for computing interaction quantities. :returns: *return type of func* |w--w| The interaction data. :returns: (*float*, :py:class:`~psi4.core.Wavefunction`) |w--w| interaction data and wavefunction with energy/gradient/hessian set appropriately when **return_wfn** specified. :type func: function :param func: ``energy`` || etc. Python function that accepts method_string and a molecule. Returns a energy, gradient, or Hessian as requested. :type method_string: string :param method_string: ``'scf'`` || ``'mp2'`` || ``'ci5'`` || etc. First argument, lowercase and usually unlabeled. Indicates the computational method to be passed to func. :type molecule: :ref:`molecule <op_py_molecule>` :param molecule: ``h2o`` || etc. The target molecule, if not the last molecule defined. :type return_wfn: :ref:`boolean <op_py_boolean>` :param return_wfn: ``'on'`` || |dl| ``'off'`` |dr| Indicate to additionally return the :py:class:`~psi4.core.Wavefunction` calculation result as the second element of a tuple. :type bsse_type: string or list :param bsse_type: ``'cp'`` || ``['nocp', 'vmfc']`` || |dl| ``None`` |dr| || etc. Type of BSSE correction to compute: CP, NoCP, or VMFC. The first in this list is returned by this function. By default, this function is not called. :type max_nbody: int :param max_nbody: ``3`` || etc. Maximum n-body to compute, cannot exceed the number of fragments in the moleucle. :type ptype: string :param ptype: ``'energy'`` || ``'gradient'`` || ``'hessian'`` Type of the procedure passed in. :type return_total_data: :ref:`boolean <op_py_boolean>` :param return_total_data: ``'on'`` || |dl| ``'off'`` |dr| If True returns the total data (energy/gradient/etc) of the system, otherwise returns interaction data. """ ### ==> Parse some kwargs <== kwargs = p4util.kwargs_lower(kwargs) return_wfn = kwargs.pop('return_wfn', False) ptype = kwargs.pop('ptype', None) return_total_data = kwargs.pop('return_total_data', False) molecule = kwargs.pop('molecule', core.get_active_molecule()) molecule.update_geometry() core.clean_variables() if ptype not in ['energy', 'gradient', 'hessian']: raise ValidationError("""N-Body driver: The ptype '%s' is not regonized.""" % ptype) # Figure out BSSE types do_cp = False do_nocp = False do_vmfc = False return_method = False # Must be passed bsse_type bsse_type_list = kwargs.pop('bsse_type') if bsse_type_list is None: raise ValidationError("N-Body GUFunc: Must pass a bsse_type") if not isinstance(bsse_type_list, list): bsse_type_list = [bsse_type_list] for num, btype in enumerate(bsse_type_list): if btype.lower() == 'cp': do_cp = True if (num == 0): return_method = 'cp' elif btype.lower() == 'nocp': do_nocp = True if (num == 0): return_method = 'nocp' elif btype.lower() == 'vmfc': do_vmfc = True if (num == 0): return_method = 'vmfc' else: raise ValidationError("N-Body GUFunc: bsse_type '%s' is not recognized" % btype.lower()) max_nbody = kwargs.get('max_nbody', -1) max_frag = molecule.nfragments() if max_nbody == -1: max_nbody = molecule.nfragments() else: max_nbody = min(max_nbody, max_frag) # What levels do we need? nbody_range = range(1, max_nbody + 1) fragment_range = range(1, max_frag + 1) # Flip this off for now, needs more testing # If we are doing CP lets save them integrals #if 'cp' in bsse_type_list and (len(bsse_type_list) == 1): # # Set to save RI integrals for repeated full-basis computations # ri_ints_io = core.get_global_option('DF_INTS_IO') # # inquire if above at all applies to dfmp2 or just scf # core.set_global_option('DF_INTS_IO', 'SAVE') # psioh = core.IOManager.shared_object() # psioh.set_specific_retention(97, True) bsse_str = bsse_type_list[0] if len(bsse_type_list) >1: bsse_str = str(bsse_type_list) core.print_out("\n\n") core.print_out(" ===> N-Body Interaction Abacus <===\n") core.print_out(" BSSE Treatment: %s\n" % bsse_str) cp_compute_list = {x:set() for x in nbody_range} nocp_compute_list = {x:set() for x in nbody_range} vmfc_compute_list = {x:set() for x in nbody_range} vmfc_level_list = {x:set() for x in nbody_range} # Need to sum something slightly different # Build up compute sets if do_cp: # Everything is in dimer basis basis_tuple = tuple(fragment_range) for nbody in nbody_range: for x in it.combinations(fragment_range, nbody): cp_compute_list[nbody].add( (x, basis_tuple) ) if do_nocp: # Everything in monomer basis for nbody in nbody_range: for x in it.combinations(fragment_range, nbody): nocp_compute_list[nbody].add( (x, x) ) if do_vmfc: # Like a CP for all combinations of pairs or greater for nbody in nbody_range: for cp_combos in it.combinations(fragment_range, nbody): basis_tuple = tuple(cp_combos) for interior_nbody in nbody_range: for x in it.combinations(cp_combos, interior_nbody): combo_tuple = (x, basis_tuple) vmfc_compute_list[interior_nbody].add( combo_tuple ) vmfc_level_list[len(basis_tuple)].add( combo_tuple ) # Build a comprehensive compute_range compute_list = {x:set() for x in nbody_range} for n in nbody_range: compute_list[n] |= cp_compute_list[n] compute_list[n] |= nocp_compute_list[n] compute_list[n] |= vmfc_compute_list[n] core.print_out(" Number of %d-body computations: %d\n" % (n, len(compute_list[n]))) # Build size and slices dictionaries fragment_size_dict = {frag: molecule.extract_subsets(frag).natom() for frag in range(1, max_frag+1)} start = 0 fragment_slice_dict = {} for k, v in fragment_size_dict.items(): fragment_slice_dict[k] = slice(start, start + v) start += v molecule_total_atoms = sum(fragment_size_dict.values()) # Now compute the energies energies_dict = {} ptype_dict = {} for n in compute_list.keys(): core.print_out("\n ==> N-Body: Now computing %d-body complexes <==\n\n" % n) total = len(compute_list[n]) for num, pair in enumerate(compute_list[n]): core.print_out("\n N-Body: Computing complex (%d/%d) with fragments %s in the basis of fragments %s.\n\n" % (num + 1, total, str(pair[0]), str(pair[1]))) ghost = list(set(pair[1]) - set(pair[0])) current_mol = molecule.extract_subsets(list(pair[0]), ghost) ptype_dict[pair] = func(method_string, molecule=current_mol, **kwargs) energies_dict[pair] = core.get_variable("CURRENT ENERGY") core.print_out("\n N-Body: Complex Energy (fragments = %s, basis = %s: %20.14f)\n" % (str(pair[0]), str(pair[1]), energies_dict[pair])) # Flip this off for now, needs more testing #if 'cp' in bsse_type_list and (len(bsse_type_list) == 1): # core.set_global_option('DF_INTS_IO', 'LOAD') core.clean() # Final dictionaries cp_energy_by_level = {n: 0.0 for n in nbody_range} nocp_energy_by_level = {n: 0.0 for n in nbody_range} cp_energy_body_dict = {n: 0.0 for n in nbody_range} nocp_energy_body_dict = {n: 0.0 for n in nbody_range} vmfc_energy_body_dict = {n: 0.0 for n in nbody_range} # Build out ptype dictionaries if needed if ptype != 'energy': if ptype == 'gradient': arr_shape = (molecule_total_atoms, 3) elif ptype == 'hessian': arr_shape = (molecule_total_atoms * 3, molecule_total_atoms * 3) else: raise KeyError("N-Body: ptype '%s' not recognized" % ptype) cp_ptype_by_level = {n: np.zeros(arr_shape) for n in nbody_range} nocp_ptype_by_level = {n: np.zeros(arr_shape) for n in nbody_range} vmfc_ptype_by_level = {n: np.zeros(arr_shape) for n in nbody_range} cp_ptype_body_dict = {n: np.zeros(arr_shape) for n in nbody_range} nocp_ptype_body_dict = {n: np.zeros(arr_shape) for n in nbody_range} vmfc_ptype_body_dict = {n: np.zeros(arr_shape) for n in nbody_range} else: cp_ptype_by_level, cp_ptype_body_dict = None, None nocp_ptype_by_level, nocp_ptype_body_dict = None, None vmfc_ptype_body_dict = None # Sum up all of the levels for n in nbody_range: # Energy cp_energy_by_level[n] = sum(energies_dict[v] for v in cp_compute_list[n]) nocp_energy_by_level[n] = sum(energies_dict[v] for v in nocp_compute_list[n]) # Special vmfc case if n > 1: vmfc_energy_body_dict[n] = vmfc_energy_body_dict[n - 1] for tup in vmfc_level_list[n]: vmfc_energy_body_dict[n] += ((-1) ** (n - len(tup[0]))) * energies_dict[tup] # Do ptype if ptype != 'energy': _sum_cluster_ptype_data(ptype, ptype_dict, cp_compute_list[n], fragment_slice_dict, fragment_size_dict, cp_ptype_by_level[n]) _sum_cluster_ptype_data(ptype, ptype_dict, nocp_compute_list[n], fragment_slice_dict, fragment_size_dict, nocp_ptype_by_level[n]) _sum_cluster_ptype_data(ptype, ptype_dict, vmfc_level_list[n], fragment_slice_dict, fragment_size_dict, vmfc_ptype_by_level[n], vmfc=True) # Compute cp energy and ptype if do_cp: for n in nbody_range: if n == max_frag: cp_energy_body_dict[n] = cp_energy_by_level[n] if ptype != 'energy': cp_ptype_body_dict[n][:] = cp_ptype_by_level[n] continue for k in range(1, n + 1): take_nk = nCr(max_frag - k - 1, n - k) sign = ((-1) ** (n - k)) value = cp_energy_by_level[k] cp_energy_body_dict[n] += take_nk * sign * value if ptype != 'energy': value = cp_ptype_by_level[k] cp_ptype_body_dict[n] += take_nk * sign * value _print_nbody_energy(cp_energy_body_dict, "Counterpoise Corrected (CP)") cp_interaction_energy = cp_energy_body_dict[max_nbody] - cp_energy_body_dict[1] core.set_variable('Counterpoise Corrected Total Energy', cp_energy_body_dict[max_nbody]) core.set_variable('Counterpoise Corrected Interaction Energy', cp_interaction_energy) for n in nbody_range[1:]: var_key = 'CP-CORRECTED %d-BODY INTERACTION ENERGY' % n core.set_variable(var_key, cp_energy_body_dict[n] - cp_energy_body_dict[1]) # Compute nocp energy and ptype if do_nocp: for n in nbody_range: if n == max_frag: nocp_energy_body_dict[n] = nocp_energy_by_level[n] if ptype != 'energy': nocp_ptype_body_dict[n][:] = nocp_ptype_by_level[n] continue for k in range(1, n + 1): take_nk = nCr(max_frag - k - 1, n - k) sign = ((-1) ** (n - k)) value = nocp_energy_by_level[k] nocp_energy_body_dict[n] += take_nk * sign * value if ptype != 'energy': value = nocp_ptype_by_level[k] nocp_ptype_body_dict[n] += take_nk * sign * value _print_nbody_energy(nocp_energy_body_dict, "Non-Counterpoise Corrected (NoCP)") nocp_interaction_energy = nocp_energy_body_dict[max_nbody] - nocp_energy_body_dict[1] core.set_variable('Non-Counterpoise Corrected Total Energy', nocp_energy_body_dict[max_nbody]) core.set_variable('Non-Counterpoise Corrected Interaction Energy', nocp_interaction_energy) for n in nbody_range[1:]: var_key = 'NOCP-CORRECTED %d-BODY INTERACTION ENERGY' % n core.set_variable(var_key, nocp_energy_body_dict[n] - nocp_energy_body_dict[1]) # Compute vmfc energy and ptype if do_vmfc: _print_nbody_energy(vmfc_energy_body_dict, "Valiron-Mayer Function Couterpoise (VMFC)") vmfc_interaction_energy = vmfc_energy_body_dict[max_nbody] - vmfc_energy_body_dict[1] core.set_variable('Valiron-Mayer Function Couterpoise Total Energy', vmfc_energy_body_dict[max_nbody]) core.set_variable('Valiron-Mayer Function Couterpoise Interaction Energy', vmfc_interaction_energy) for n in nbody_range[1:]: var_key = 'VMFC-CORRECTED %d-BODY INTERACTION ENERGY' % n core.set_variable(var_key, vmfc_energy_body_dict[n] - vmfc_energy_body_dict[1]) if return_method == 'cp': ptype_body_dict = cp_ptype_body_dict energy_body_dict = cp_energy_body_dict elif return_method == 'nocp': ptype_body_dict = nocp_ptype_body_dict energy_body_dict = nocp_energy_body_dict elif return_method == 'vmfc': ptype_body_dict = vmfc_ptype_body_dict energy_body_dict = vmfc_energy_body_dict else: raise ValidationError("N-Body Wrapper: Invalid return type. Should never be here, please post this error on github.") # Figure out and build return types if return_total_data: ret_energy = energy_body_dict[max_nbody] else: ret_energy = energy_body_dict[max_nbody] ret_energy -= energy_body_dict[1] if ptype != 'energy': if return_total_data: np_final_ptype = ptype_body_dict[max_nbody].copy() else: np_final_ptype = ptype_body_dict[max_nbody].copy() np_final_ptype -= ptype_body_dict[1] ret_ptype = core.Matrix.from_array(np_final_ptype) else: ret_ptype = ret_energy # Build and set a wavefunction wfn = core.Wavefunction.build(molecule, 'sto-3g') wfn.nbody_energy = energies_dict wfn.nbody_ptype = ptype_dict wfn.nbody_body_energy = energy_body_dict wfn.nbody_body_ptype = ptype_body_dict if ptype == 'gradient': wfn.set_gradient(ret_ptype) elif ptype == 'hessian': wfn.set_hessian(ret_ptype) core.set_variable("CURRENT ENERGY", ret_energy) if return_wfn: return (ret_ptype, wfn) else: return ret_ptype
def run_gaussian_2(name, **kwargs): # throw an exception for open-shells if (core.get_option('SCF', 'REFERENCE') != 'RHF'): raise ValidationError("""g2 computations require "reference rhf".""") # stash user options: optstash = p4util.OptionsState(['FNOCC', 'COMPUTE_TRIPLES'], ['FNOCC', 'COMPUTE_MP4_TRIPLES'], ['BASIS'], ['FREEZE_CORE'], ['MP2_TYPE'], ['SCF_TYPE']) # override default scf_type core.set_global_option('SCF_TYPE', 'PK') # optimize geometry at scf level core.clean() core.set_global_option('BASIS', "6-31G(D)") driver.optimize('scf') core.clean() # scf frequencies for zpe # NOTE This line should not be needed, but without it there's a seg fault scf_e, ref = driver.frequency('scf', return_wfn=True) # thermodynamic properties du = core.variable('THERMAL ENERGY CORRECTION') dh = core.variable('ENTHALPY CORRECTION') dg = core.variable('GIBBS FREE ENERGY CORRECTION') freqs = ref.frequencies() nfreq = freqs.dim(0) freqsum = 0.0 for i in range(0, nfreq): freqsum += freqs.get(i) zpe = freqsum / constants.hartree2wavenumbers * 0.8929 * 0.5 core.clean() # optimize geometry at mp2 (no frozen core) level # note: freeze_core isn't an option in MP2 core.set_global_option('FREEZE_CORE', "FALSE") core.set_global_option('MP2_TYPE', 'CONV') driver.optimize('mp2') core.clean() # qcisd(t) core.set_local_option('FNOCC', 'COMPUTE_MP4_TRIPLES', "TRUE") core.set_global_option('FREEZE_CORE', "TRUE") core.set_global_option('BASIS', "6-311G(D_P)") ref = driver.proc.run_fnocc('qcisd(t)', return_wfn=True, **kwargs) # HLC: high-level correction based on number of valence electrons nirrep = ref.nirrep() frzcpi = ref.frzcpi() nfzc = 0 for i in range(0, nirrep): nfzc += frzcpi[i] nalpha = ref.nalpha() - nfzc nbeta = ref.nbeta() - nfzc # hlc of gaussian-2 hlc = -0.00481 * nalpha - 0.00019 * nbeta # hlc of gaussian-1 hlc1 = -0.00614 * nalpha eqci_6311gdp = core.variable("QCISD(T) TOTAL ENERGY") emp4_6311gd = core.variable("MP4 TOTAL ENERGY") emp2_6311gd = core.variable("MP2 TOTAL ENERGY") core.clean() # correction for diffuse functions core.set_global_option('BASIS', "6-311+G(D_P)") driver.energy('mp4') emp4_6311pg_dp = core.variable("MP4 TOTAL ENERGY") emp2_6311pg_dp = core.variable("MP2 TOTAL ENERGY") core.clean() # correction for polarization functions core.set_global_option('BASIS', "6-311G(2DF_P)") driver.energy('mp4') emp4_6311g2dfp = core.variable("MP4 TOTAL ENERGY") emp2_6311g2dfp = core.variable("MP2 TOTAL ENERGY") core.clean() # big basis mp2 core.set_global_option('BASIS', "6-311+G(3DF_2P)") #run_fnocc('_mp2',**kwargs) driver.energy('mp2') emp2_big = core.variable("MP2 TOTAL ENERGY") core.clean() eqci = eqci_6311gdp e_delta_g2 = emp2_big + emp2_6311gd - emp2_6311g2dfp - emp2_6311pg_dp e_plus = emp4_6311pg_dp - emp4_6311gd e_2df = emp4_6311g2dfp - emp4_6311gd eg2 = eqci + e_delta_g2 + e_plus + e_2df eg2_mp2_0k = eqci + (emp2_big - emp2_6311gd) + hlc + zpe core.print_out('\n') core.print_out(' ==> G1/G2 Energy Components <==\n') core.print_out('\n') core.print_out(' QCISD(T): %20.12lf\n' % eqci) core.print_out(' E(Delta): %20.12lf\n' % e_delta_g2) core.print_out(' E(2DF): %20.12lf\n' % e_2df) core.print_out(' E(+): %20.12lf\n' % e_plus) core.print_out(' E(G1 HLC): %20.12lf\n' % hlc1) core.print_out(' E(G2 HLC): %20.12lf\n' % hlc) core.print_out(' E(ZPE): %20.12lf\n' % zpe) core.print_out('\n') core.print_out(' ==> 0 Kelvin Results <==\n') core.print_out('\n') eg2_0k = eg2 + zpe + hlc core.print_out(' G1: %20.12lf\n' % (eqci + e_plus + e_2df + hlc1 + zpe)) core.print_out(' G2(MP2): %20.12lf\n' % eg2_mp2_0k) core.print_out(' G2: %20.12lf\n' % eg2_0k) core.set_variable("G1 TOTAL ENERGY", eqci + e_plus + e_2df + hlc1 + zpe) core.set_variable("G2 TOTAL ENERGY", eg2_0k) core.set_variable("G2(MP2) TOTAL ENERGY", eg2_mp2_0k) core.print_out('\n') T = core.get_global_option('T') core.print_out(' ==> %3.0lf Kelvin Results <==\n' % T) core.print_out('\n') internal_energy = eg2_mp2_0k + du - zpe / 0.8929 enthalpy = eg2_mp2_0k + dh - zpe / 0.8929 gibbs = eg2_mp2_0k + dg - zpe / 0.8929 core.print_out(' G2(MP2) energy: %20.12lf\n' % internal_energy) core.print_out(' G2(MP2) enthalpy: %20.12lf\n' % enthalpy) core.print_out(' G2(MP2) free energy: %20.12lf\n' % gibbs) core.print_out('\n') core.set_variable("G2(MP2) INTERNAL ENERGY", internal_energy) core.set_variable("G2(MP2) ENTHALPY", enthalpy) core.set_variable("G2(MP2) FREE ENERGY", gibbs) internal_energy = eg2_0k + du - zpe / 0.8929 enthalpy = eg2_0k + dh - zpe / 0.8929 gibbs = eg2_0k + dg - zpe / 0.8929 core.print_out(' G2 energy: %20.12lf\n' % internal_energy) core.print_out(' G2 enthalpy: %20.12lf\n' % enthalpy) core.print_out(' G2 free energy: %20.12lf\n' % gibbs) core.set_variable("CURRENT ENERGY", eg2_0k) core.set_variable("G2 INTERNAL ENERGY", internal_energy) core.set_variable("G2 ENTHALPY", enthalpy) core.set_variable("G2 FREE ENERGY", gibbs) core.clean() optstash.restore() # return 0K g2 results return eg2_0k
def __init__(self, molecule, basis, numpy_memory=2.e9, scf_type="PK", use_c=False): # Set defaults maxiter = 40 E_conv = 1.0E-6 D_conv = 1.0E-3 # Integral generation from Psi4's MintsHelper start_time = time.time() self.basis_name = basis self.molecule = molecule self.basis = pc.BasisSet.build(self.molecule, "ORBITAL", basis) self.mints = pc.MintsHelper(self.basis) self.S = np.asarray(self.mints.ao_overlap()) # Get nbf and ndocc for closed shell molecules self.nbf = self.S.shape[0] self.nel = sum( self.molecule.Z(n) for n in range(self.molecule.natom())) self.nel -= self.molecule.molecular_charge() if not (self.nel / 2.0).is_integer(): raise ValueError( "RHF: Molecule did not have an even number of electrons!") self.ndocc = int(self.nel / 2.0) print('\nNumber of occupied orbitals: %d' % self.ndocc) print('Number of basis functions: %d' % self.nbf) # Run a quick check to make sure everything will fit into memory I_Size = (self.nbf**4) * 8.e-9 print("\nSize of the ERI tensor will be %4.2f GB." % I_Size) # Estimate memory usage memory_footprint = I_Size * 1.5 if I_Size > numpy_memory: pc.clean() raise Exception( "Estimated memory utilization (%4.2f GB) exceeds numpy_memory \ limit of %4.2f GB." % (memory_footprint, numpy_memory)) # Compute required quantities for SCF self.V = np.asarray(self.mints.ao_potential()) self.T = np.asarray(self.mints.ao_kinetic()) # self.I = np.asarray(self.mints.ao_eri()) self.JK = jk.build_JK(self.molecule, self.basis_name, scf_type, use_c) self.Enuc = self.molecule.nuclear_repulsion_energy() print('\nTotal time taken for integrals: %.3f seconds.' % (time.time() - start_time)) t = time.time() # Build H_core self.H = self.T + self.V # Orthogonalizer A = S^(-1/2) using Psi4's matrix power. A = self.mints.ao_overlap() A.power(-0.5, 1.e-16) self.A = np.asarray(A) print('\nTotal time taken for setup: %.3f seconds' % (time.time() - start_time))